From cd6487b96f14b329906f440ee1cdf30549615b39 Mon Sep 17 00:00:00 2001 From: hivyas Date: Tue, 17 Nov 2020 14:58:05 -0800 Subject: [PATCH 1/9] added lva sdk package --- sdk/media/azure-media-lva-edge/CHANGELOG.md | 8 + sdk/media/azure-media-lva-edge/MANIFEST.in | 4 + sdk/media/azure-media-lva-edge/README.md | 38 + .../azure-media-lva-edge/azure/__init__.py | 7 + .../azure/media/lva/edge/__init__.py | 20 + .../media/lva/edge/_generated/__init__.py | 1 + .../media/lva/edge/_generated/_version.py | 9 + .../lva/edge/_generated/models/__init__.py | 199 ++ ...r_live_video_analyticson_io_tedge_enums.py | 108 + .../lva/edge/_generated/models/_models.py | 2008 +++++++++++++++ .../lva/edge/_generated/models/_models_py3.py | 2185 +++++++++++++++++ .../azure/media/lva/edge/_generated/py.typed | 1 + .../azure/media/lva/edge/_version.py | 7 + .../azure-media-lva-edge/dev_requirements.txt | 11 + .../samples/sample_conditional_async.py | 48 + .../samples/sample_hello_world.py | 35 + .../samples/sample_lva.py | 83 + .../azure-media-lva-edge/sdk_packaging.toml | 4 + sdk/media/azure-media-lva-edge/setup.cfg | 2 + sdk/media/azure-media-lva-edge/setup.py | 102 + .../azure-media-lva-edge/swagger/README.md | 26 + .../swagger/appconfiguration.json | 1239 ++++++++++ .../swagger/commandOutput.txt | 158 ++ .../tests/_shared/asynctestcase.py | 79 + .../tests/_shared/testcase.py | 0 .../azure-media-lva-edge/tests/conftest.py | 25 + .../tests/test_app_config.py | 1 + sdk/media/ci.yml | 3 + 28 files changed, 6411 insertions(+) create mode 100644 sdk/media/azure-media-lva-edge/CHANGELOG.md create mode 100644 sdk/media/azure-media-lva-edge/MANIFEST.in create mode 100644 sdk/media/azure-media-lva-edge/README.md create mode 100644 sdk/media/azure-media-lva-edge/azure/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed create mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py create mode 100644 sdk/media/azure-media-lva-edge/dev_requirements.txt create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_hello_world.py create mode 100644 sdk/media/azure-media-lva-edge/samples/sample_lva.py create mode 100644 sdk/media/azure-media-lva-edge/sdk_packaging.toml create mode 100644 sdk/media/azure-media-lva-edge/setup.cfg create mode 100644 sdk/media/azure-media-lva-edge/setup.py create mode 100644 sdk/media/azure-media-lva-edge/swagger/README.md create mode 100644 sdk/media/azure-media-lva-edge/swagger/appconfiguration.json create mode 100644 sdk/media/azure-media-lva-edge/swagger/commandOutput.txt create mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py create mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/testcase.py create mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py create mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py diff --git a/sdk/media/azure-media-lva-edge/CHANGELOG.md b/sdk/media/azure-media-lva-edge/CHANGELOG.md new file mode 100644 index 000000000000..816f21db092e --- /dev/null +++ b/sdk/media/azure-media-lva-edge/CHANGELOG.md @@ -0,0 +1,8 @@ + +# Release History + +------------------- + +## 0.0.1 (Unreleased) + +- Training day! diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in new file mode 100644 index 000000000000..7ebdd947f8ff --- /dev/null +++ b/sdk/media/azure-media-lva-edge/MANIFEST.in @@ -0,0 +1,4 @@ +recursive-include tests *.py +include *.md +include azure/__init__.py +recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md new file mode 100644 index 000000000000..c5012d4038c9 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/README.md @@ -0,0 +1,38 @@ +# Azure App Configuration client library for Python SDK Training + +Azure App Configuration is a managed service that helps developers centralize their application configurations simply and securely. + +Modern programs, especially programs running in a cloud, generally have many components that are distributed in nature. Spreading configuration settings across these components can lead to hard-to-troubleshoot errors during an application deployment. Use App Configuration to securely store all the settings for your application in one place. + +Use the client library for App Configuration to create and manage application configuration settings. + +## Prerequisites + +* Python 2.7, or 3.5 or later is required to use this package. +* You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. + +To create a Configuration Store, you can use the Azure Portal or [Azure CLI][azure_cli]. + +After that, create the Configuration Store: + +```Powershell +az appconfig create --name --resource-group --location eastus +``` + + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, +see the Code of Conduct FAQ or contact opencode@microsoft.com with any +additional questions or comments. diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-lva-edge/azure/__init__.py new file mode 100644 index 000000000000..0e40e134bdac --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/__init__.py @@ -0,0 +1,7 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- + +__path__ = __import__("pkgutil").extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py new file mode 100644 index 000000000000..725cd6860541 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py @@ -0,0 +1,20 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore +from azure.media.lva.edge._generated.models import MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody + +def _OverrideTopologySetRequestSerialize(self): + graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) + graph_body.system_data = self.graph.system_data + graph_body.properties = self.graph.properties + + return graph_body.serialize() + +MediaGraphTopologySetRequest.serialize = _OverrideTopologySetRequestSerialize + +def _OverrideInstanceSetRequestSerialize(self): + graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) + graph_body.system_data = self.instance.system_data + graph_body.properties = self.instance.properties + + return graph_body.serialize() + +MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py new file mode 100644 index 000000000000..5960c353a898 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py new file mode 100644 index 000000000000..31ed98425268 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0" diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py new file mode 100644 index 000000000000..2e389ab8ef9d --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py @@ -0,0 +1,199 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +try: + from ._models_py3 import ItemNonSetRequestBase + from ._models_py3 import MediaGraphAssetSink + from ._models_py3 import MediaGraphCertificateSource + from ._models_py3 import MediaGraphCognitiveServicesVisionExtension + from ._models_py3 import MediaGraphCredentials + from ._models_py3 import MediaGraphEndpoint + from ._models_py3 import MediaGraphExtensionProcessorBase + from ._models_py3 import MediaGraphFileSink + from ._models_py3 import MediaGraphFrameRateFilterProcessor + from ._models_py3 import MediaGraphGrpcExtension + from ._models_py3 import MediaGraphGrpcExtensionDataTransfer + from ._models_py3 import MediaGraphHttpExtension + from ._models_py3 import MediaGraphHttpHeaderCredentials + from ._models_py3 import MediaGraphImage + from ._models_py3 import MediaGraphImageFormat + from ._models_py3 import MediaGraphImageFormatEncoded + from ._models_py3 import MediaGraphImageFormatRaw + from ._models_py3 import MediaGraphImageScale + from ._models_py3 import MediaGraphInstance + from ._models_py3 import MediaGraphInstanceActivateRequest + from ._models_py3 import MediaGraphInstanceCollection + from ._models_py3 import MediaGraphInstanceDeActivateRequest + from ._models_py3 import MediaGraphInstanceDeleteRequest + from ._models_py3 import MediaGraphInstanceGetRequest + from ._models_py3 import MediaGraphInstanceListRequest + from ._models_py3 import MediaGraphInstanceProperties + from ._models_py3 import MediaGraphInstanceSetRequest + from ._models_py3 import MediaGraphInstanceSetRequestBody + from ._models_py3 import MediaGraphIoTHubMessageSink + from ._models_py3 import MediaGraphIoTHubMessageSource + from ._models_py3 import MediaGraphMotionDetectionProcessor + from ._models_py3 import MediaGraphNodeInput + from ._models_py3 import MediaGraphOutputSelector + from ._models_py3 import MediaGraphParameterDeclaration + from ._models_py3 import MediaGraphParameterDefinition + from ._models_py3 import MediaGraphPemCertificateList + from ._models_py3 import MediaGraphProcessor + from ._models_py3 import MediaGraphRtspSource + from ._models_py3 import MediaGraphSignalGateProcessor + from ._models_py3 import MediaGraphSink + from ._models_py3 import MediaGraphSource + from ._models_py3 import MediaGraphSystemData + from ._models_py3 import MediaGraphTlsEndpoint + from ._models_py3 import MediaGraphTlsValidationOptions + from ._models_py3 import MediaGraphTopology + from ._models_py3 import MediaGraphTopologyCollection + from ._models_py3 import MediaGraphTopologyDeleteRequest + from ._models_py3 import MediaGraphTopologyGetRequest + from ._models_py3 import MediaGraphTopologyListRequest + from ._models_py3 import MediaGraphTopologyProperties + from ._models_py3 import MediaGraphTopologySetRequest + from ._models_py3 import MediaGraphTopologySetRequestBody + from ._models_py3 import MediaGraphUnsecuredEndpoint + from ._models_py3 import MediaGraphUsernamePasswordCredentials + from ._models_py3 import OperationBase +except (SyntaxError, ImportError): + from ._models import ItemNonSetRequestBase # type: ignore + from ._models import MediaGraphAssetSink # type: ignore + from ._models import MediaGraphCertificateSource # type: ignore + from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore + from ._models import MediaGraphCredentials # type: ignore + from ._models import MediaGraphEndpoint # type: ignore + from ._models import MediaGraphExtensionProcessorBase # type: ignore + from ._models import MediaGraphFileSink # type: ignore + from ._models import MediaGraphFrameRateFilterProcessor # type: ignore + from ._models import MediaGraphGrpcExtension # type: ignore + from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore + from ._models import MediaGraphHttpExtension # type: ignore + from ._models import MediaGraphHttpHeaderCredentials # type: ignore + from ._models import MediaGraphImage # type: ignore + from ._models import MediaGraphImageFormat # type: ignore + from ._models import MediaGraphImageFormatEncoded # type: ignore + from ._models import MediaGraphImageFormatRaw # type: ignore + from ._models import MediaGraphImageScale # type: ignore + from ._models import MediaGraphInstance # type: ignore + from ._models import MediaGraphInstanceActivateRequest # type: ignore + from ._models import MediaGraphInstanceCollection # type: ignore + from ._models import MediaGraphInstanceDeActivateRequest # type: ignore + from ._models import MediaGraphInstanceDeleteRequest # type: ignore + from ._models import MediaGraphInstanceGetRequest # type: ignore + from ._models import MediaGraphInstanceListRequest # type: ignore + from ._models import MediaGraphInstanceProperties # type: ignore + from ._models import MediaGraphInstanceSetRequest # type: ignore + from ._models import MediaGraphInstanceSetRequestBody # type: ignore + from ._models import MediaGraphIoTHubMessageSink # type: ignore + from ._models import MediaGraphIoTHubMessageSource # type: ignore + from ._models import MediaGraphMotionDetectionProcessor # type: ignore + from ._models import MediaGraphNodeInput # type: ignore + from ._models import MediaGraphOutputSelector # type: ignore + from ._models import MediaGraphParameterDeclaration # type: ignore + from ._models import MediaGraphParameterDefinition # type: ignore + from ._models import MediaGraphPemCertificateList # type: ignore + from ._models import MediaGraphProcessor # type: ignore + from ._models import MediaGraphRtspSource # type: ignore + from ._models import MediaGraphSignalGateProcessor # type: ignore + from ._models import MediaGraphSink # type: ignore + from ._models import MediaGraphSource # type: ignore + from ._models import MediaGraphSystemData # type: ignore + from ._models import MediaGraphTlsEndpoint # type: ignore + from ._models import MediaGraphTlsValidationOptions # type: ignore + from ._models import MediaGraphTopology # type: ignore + from ._models import MediaGraphTopologyCollection # type: ignore + from ._models import MediaGraphTopologyDeleteRequest # type: ignore + from ._models import MediaGraphTopologyGetRequest # type: ignore + from ._models import MediaGraphTopologyListRequest # type: ignore + from ._models import MediaGraphTopologyProperties # type: ignore + from ._models import MediaGraphTopologySetRequest # type: ignore + from ._models import MediaGraphTopologySetRequestBody # type: ignore + from ._models import MediaGraphUnsecuredEndpoint # type: ignore + from ._models import MediaGraphUsernamePasswordCredentials # type: ignore + from ._models import OperationBase # type: ignore + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( + MediaGraphGrpcExtensionDataTransferMode, + MediaGraphImageEncodingFormat, + MediaGraphImageFormatRawPixelFormat, + MediaGraphImageScaleMode, + MediaGraphInstanceState, + MediaGraphMotionDetectionSensitivity, + MediaGraphOutputSelectorOperator, + MediaGraphParameterType, + MediaGraphRtspTransport, +) + +__all__ = [ + 'ItemNonSetRequestBase', + 'MediaGraphAssetSink', + 'MediaGraphCertificateSource', + 'MediaGraphCognitiveServicesVisionExtension', + 'MediaGraphCredentials', + 'MediaGraphEndpoint', + 'MediaGraphExtensionProcessorBase', + 'MediaGraphFileSink', + 'MediaGraphFrameRateFilterProcessor', + 'MediaGraphGrpcExtension', + 'MediaGraphGrpcExtensionDataTransfer', + 'MediaGraphHttpExtension', + 'MediaGraphHttpHeaderCredentials', + 'MediaGraphImage', + 'MediaGraphImageFormat', + 'MediaGraphImageFormatEncoded', + 'MediaGraphImageFormatRaw', + 'MediaGraphImageScale', + 'MediaGraphInstance', + 'MediaGraphInstanceActivateRequest', + 'MediaGraphInstanceCollection', + 'MediaGraphInstanceDeActivateRequest', + 'MediaGraphInstanceDeleteRequest', + 'MediaGraphInstanceGetRequest', + 'MediaGraphInstanceListRequest', + 'MediaGraphInstanceProperties', + 'MediaGraphInstanceSetRequest', + 'MediaGraphInstanceSetRequestBody', + 'MediaGraphIoTHubMessageSink', + 'MediaGraphIoTHubMessageSource', + 'MediaGraphMotionDetectionProcessor', + 'MediaGraphNodeInput', + 'MediaGraphOutputSelector', + 'MediaGraphParameterDeclaration', + 'MediaGraphParameterDefinition', + 'MediaGraphPemCertificateList', + 'MediaGraphProcessor', + 'MediaGraphRtspSource', + 'MediaGraphSignalGateProcessor', + 'MediaGraphSink', + 'MediaGraphSource', + 'MediaGraphSystemData', + 'MediaGraphTlsEndpoint', + 'MediaGraphTlsValidationOptions', + 'MediaGraphTopology', + 'MediaGraphTopologyCollection', + 'MediaGraphTopologyDeleteRequest', + 'MediaGraphTopologyGetRequest', + 'MediaGraphTopologyListRequest', + 'MediaGraphTopologyProperties', + 'MediaGraphTopologySetRequest', + 'MediaGraphTopologySetRequestBody', + 'MediaGraphUnsecuredEndpoint', + 'MediaGraphUsernamePasswordCredentials', + 'OperationBase', + 'MediaGraphGrpcExtensionDataTransferMode', + 'MediaGraphImageEncodingFormat', + 'MediaGraphImageFormatRawPixelFormat', + 'MediaGraphImageScaleMode', + 'MediaGraphInstanceState', + 'MediaGraphMotionDetectionSensitivity', + 'MediaGraphOutputSelectorOperator', + 'MediaGraphParameterType', + 'MediaGraphRtspTransport', +] diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py new file mode 100644 index 000000000000..6e78e4728244 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum, EnumMeta +from six import with_metaclass + +class _CaseInsensitiveEnumMeta(EnumMeta): + def __getitem__(self, name): + return super().__getitem__(name.upper()) + + def __getattr__(cls, name): + """Return the enum member matching `name` + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + try: + return cls._member_map_[name.upper()] + except KeyError: + raise AttributeError(name) + + +class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """How frame data should be transmitted to the inferencing engine. + """ + + EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. + SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. + +class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The different encoding formats that can be used for the image. + """ + + JPEG = "Jpeg" #: JPEG image format. + BMP = "Bmp" #: BMP image format. + PNG = "Png" #: PNG image format. + +class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """pixel format + """ + + YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). + RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. + RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. + RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. + RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. + RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. + BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. + ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. + RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. + ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. + BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. + +class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Describes the modes for scaling an input video frame into an image, before it is sent to an + inference engine. + """ + + PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. + PAD = "Pad" #: Center pad the input frame to match the given dimensions. + STRETCH = "Stretch" #: Stretch input frame to match given dimensions. + +class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Allowed states for a graph Instance. + """ + + INACTIVE = "Inactive" #: Inactive state. + ACTIVATING = "Activating" #: Activating state. + ACTIVE = "Active" #: Active state. + DEACTIVATING = "Deactivating" #: Deactivating state. + +class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Enumeration that specifies the sensitivity of the motion detection processor. + """ + + LOW = "Low" #: Low Sensitivity. + MEDIUM = "Medium" #: Medium Sensitivity. + HIGH = "High" #: High Sensitivity. + +class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The operator to compare streams by. + """ + + IS_ENUM = "is" #: A media type is the same type or a subtype. + IS_NOT = "isNot" #: A media type is not the same type or a subtype. + +class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """name + """ + + STRING = "String" #: A string parameter value. + SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. + INT = "Int" #: A 32-bit signed integer as parameter value. + DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. + BOOL = "Bool" #: A boolean value that is either true or false. + +class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + """ + + HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. + TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py new file mode 100644 index 000000000000..62f58c7ea385 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py @@ -0,0 +1,2008 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import msrest.serialization + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = kwargs['name'] + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = kwargs.get('asset_name_pattern', None) + self.segment_length = kwargs.get('segment_length', None) + self.local_media_cache_path = kwargs.get('local_media_cache_path', None) + self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + self.inputs = kwargs['inputs'] + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = kwargs.get('endpoint', None) + self.image = kwargs.get('image', None) + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = kwargs.get('credentials', None) + self.url = kwargs['url'] + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = kwargs['file_path_pattern'] + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = kwargs.get('maximum_fps', None) + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = kwargs['data_transfer'] + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) + self.mode = kwargs['mode'] + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = kwargs['header_name'] + self.header_value = kwargs['header_value'] + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = kwargs.get('scale', None) + self.format = kwargs.get('format', None) + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = kwargs.get('encoding', "Jpeg") + self.quality = kwargs.get('quality', None) + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = kwargs.get('pixel_format', None) + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = kwargs.get('mode', None) + self.width = kwargs.get('width', None) + self.height = kwargs.get('height', None) + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.topology_name = kwargs.get('topology_name', None) + self.parameters = kwargs.get('parameters', None) + self.state = kwargs.get('state', None) + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = kwargs['instance'] + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = kwargs.get('hub_output_name', None) + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = kwargs['name'] + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = kwargs.get('hub_input_name', None) + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = kwargs.get('sensitivity', None) + self.output_motion_region = kwargs.get('output_motion_region', None) + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.output_selectors = kwargs.get('output_selectors', None) + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = kwargs.get('operator', None) + self.value = kwargs.get('value', None) + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.description = kwargs.get('description', None) + self.default = kwargs.get('default', None) + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = kwargs['name'] + self.value = kwargs['value'] + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = kwargs['certificates'] + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = kwargs.get('transport', None) + self.endpoint = kwargs['endpoint'] + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) + self.activation_signal_offset = kwargs.get('activation_signal_offset', None) + self.minimum_activation_time = kwargs.get('minimum_activation_time', None) + self.maximum_activation_time = kwargs.get('maximum_activation_time', None) + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = kwargs.get('created_at', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = kwargs.get('trusted_certificates', None) + self.validation_options = kwargs.get('validation_options', None) + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = kwargs.get('ignore_hostname', None) + self.ignore_signature = kwargs.get('ignore_signature', None) + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.continuation_token = kwargs.get('continuation_token', None) + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = kwargs.get('description', None) + self.parameters = kwargs.get('parameters', None) + self.sources = kwargs.get('sources', None) + self.processors = kwargs.get('processors', None) + self.sinks = kwargs.get('sinks', None) + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = kwargs['graph'] + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = kwargs['name'] + self.system_data = kwargs.get('system_data', None) + self.properties = kwargs.get('properties', None) + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = kwargs['username'] + self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py new file mode 100644 index 000000000000..5de3adde8e11 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py @@ -0,0 +1,2185 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import List, Optional, Union + +import msrest.serialization + +from ._definitionsfor_live_video_analyticson_io_tedge_enums import * + + +class OperationBase(msrest.serialization.Model): + """OperationBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(OperationBase, self).__init__(**kwargs) + self.method_name = None # type: Optional[str] + + +class ItemNonSetRequestBase(OperationBase): + """ItemNonSetRequestBase. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(ItemNonSetRequestBase, self).__init__(**kwargs) + self.method_name = 'ItemNonSetRequestBase' # type: str + self.name = name + + +class MediaGraphSink(msrest.serialization.Model): + """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphSink, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphAssetSink(MediaGraphSink): + """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param asset_name_pattern: A name pattern when creating new assets. + :type asset_name_pattern: str + :param segment_length: When writing media to an asset, wait until at least this duration of + media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum + of 30 seconds and a recommended maximum of 5 minutes. + :type segment_length: ~datetime.timedelta + :param local_media_cache_path: Path to a local file system directory for temporary caching of + media, before writing to an Asset. Used when the Edge device is temporarily disconnected from + Azure. + :type local_media_cache_path: str + :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for + temporary caching of media. + :type local_media_cache_maximum_size_mi_b: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, + 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, + 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, + 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + asset_name_pattern: Optional[str] = None, + segment_length: Optional[datetime.timedelta] = None, + local_media_cache_path: Optional[str] = None, + local_media_cache_maximum_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str + self.asset_name_pattern = asset_name_pattern + self.segment_length = segment_length + self.local_media_cache_path = local_media_cache_path + self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b + + +class MediaGraphCertificateSource(msrest.serialization.Model): + """Base class for certificate sources. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphPemCertificateList. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCertificateSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphProcessor(msrest.serialization.Model): + """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + **kwargs + ): + super(MediaGraphProcessor, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + self.inputs = inputs + + +class MediaGraphExtensionProcessorBase(MediaGraphProcessor): + """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str + self.endpoint = endpoint + self.image = image + + +class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str + + +class MediaGraphCredentials(msrest.serialization.Model): + """Credentials to present during authentication. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphCredentials, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphEndpoint(msrest.serialization.Model): + """Base class for endpoints. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphEndpoint, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.credentials = credentials + self.url = url + + +class MediaGraphFileSink(MediaGraphSink): + """Enables a media graph to write/store media (video and audio) to a file on the Edge device. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param file_path_pattern: Required. Absolute file path pattern for creating new files on the + Edge device. + :type file_path_pattern: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'file_path_pattern': {'required': True, 'min_length': 1}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + file_path_pattern: str, + **kwargs + ): + super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str + self.file_path_pattern = file_path_pattern + + +class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): + """Limits the frame rate on the input video stream based on the maximumFps property. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not + exceed this limit. + :type maximum_fps: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + maximum_fps: Optional[str] = None, + **kwargs + ): + super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str + self.maximum_fps = maximum_fps + + +class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + :param data_transfer: Required. How media should be transferred to the inferencing engine. + :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + 'data_transfer': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + data_transfer: "MediaGraphGrpcExtensionDataTransfer", + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str + self.data_transfer = data_transfer + + +class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): + """Describes how media should be transferred to the inferencing engine. + + All required parameters must be populated in order to send to Azure. + + :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if + mode is SharedMemory. Should not be specificed otherwise. + :type shared_memory_size_mi_b: str + :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible + values include: "Embedded", "SharedMemory". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode + """ + + _validation = { + 'mode': {'required': True}, + } + + _attribute_map = { + 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], + shared_memory_size_mi_b: Optional[str] = None, + **kwargs + ): + super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) + self.shared_memory_size_mi_b = shared_memory_size_mi_b + self.mode = mode + + +class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): + """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param endpoint: Endpoint to which this processor should connect. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + :param image: Describes the parameters of the image that is sent as input to the endpoint. + :type image: ~azure.media.lva.edge.models.MediaGraphImage + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + 'image': {'key': 'image', 'type': 'MediaGraphImage'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + endpoint: Optional["MediaGraphEndpoint"] = None, + image: Optional["MediaGraphImage"] = None, + **kwargs + ): + super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str + + +class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): + """Http header service credentials. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param header_name: Required. HTTP header name. + :type header_name: str + :param header_value: Required. HTTP header value. + :type header_value: str + """ + + _validation = { + 'type': {'required': True}, + 'header_name': {'required': True}, + 'header_value': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'header_name': {'key': 'headerName', 'type': 'str'}, + 'header_value': {'key': 'headerValue', 'type': 'str'}, + } + + def __init__( + self, + *, + header_name: str, + header_value: str, + **kwargs + ): + super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str + self.header_name = header_name + self.header_value = header_value + + +class MediaGraphImage(msrest.serialization.Model): + """Describes the properties of an image frame. + + :param scale: The scaling mode for the image. + :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale + :param format: Encoding settings for an image. + :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat + """ + + _attribute_map = { + 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, + 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, + } + + def __init__( + self, + *, + scale: Optional["MediaGraphImageScale"] = None, + format: Optional["MediaGraphImageFormat"] = None, + **kwargs + ): + super(MediaGraphImage, self).__init__(**kwargs) + self.scale = scale + self.format = format + + +class MediaGraphImageFormat(msrest.serialization.Model): + """Encoding settings for an image. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} + } + + def __init__( + self, + **kwargs + ): + super(MediaGraphImageFormat, self).__init__(**kwargs) + self.type = None # type: Optional[str] + + +class MediaGraphImageFormatEncoded(MediaGraphImageFormat): + """Allowed formats for the image. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param encoding: The different encoding formats that can be used for the image. Possible values + include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". + :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat + :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best + quality). + :type quality: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'encoding': {'key': 'encoding', 'type': 'str'}, + 'quality': {'key': 'quality', 'type': 'str'}, + } + + def __init__( + self, + *, + encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", + quality: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str + self.encoding = encoding + self.quality = quality + + +class MediaGraphImageFormatRaw(MediaGraphImageFormat): + """Encoding settings for raw images. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", + "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". + :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, + } + + def __init__( + self, + *, + pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, + **kwargs + ): + super(MediaGraphImageFormatRaw, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str + self.pixel_format = pixel_format + + +class MediaGraphImageScale(msrest.serialization.Model): + """The scaling mode for the image. + + :param mode: Describes the modes for scaling an input video frame into an image, before it is + sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". + :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode + :param width: The desired output width of the image. + :type width: str + :param height: The desired output height of the image. + :type height: str + """ + + _attribute_map = { + 'mode': {'key': 'mode', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + } + + def __init__( + self, + *, + mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, + width: Optional[str] = None, + height: Optional[str] = None, + **kwargs + ): + super(MediaGraphImageScale, self).__init__(**kwargs) + self.mode = mode + self.width = width + self.height = height + + +class MediaGraphInstance(msrest.serialization.Model): + """Represents a Media Graph instance. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstance, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceActivate' # type: str + + +class MediaGraphInstanceCollection(msrest.serialization.Model): + """Collection of graph instances. + + :param value: Collection of graph instances. + :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph instance collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphInstance"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphInstanceCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeActivateRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDeactivate' # type: str + + +class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): + """MediaGraphInstanceDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceDelete' # type: str + + +class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): + """MediaGraphInstanceGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphInstanceGet' # type: str + + +class MediaGraphInstanceListRequest(OperationBase): + """MediaGraphInstanceListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphInstanceListRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceList' # type: str + + +class MediaGraphInstanceProperties(msrest.serialization.Model): + """Properties of a Media Graph instance. + + :param description: An optional description for the instance. + :type description: str + :param topology_name: The name of the graph topology that this instance will run. A topology + with this name should already have been set in the Edge module. + :type topology_name: str + :param parameters: List of one or more graph instance parameters. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] + :param state: Allowed states for a graph Instance. Possible values include: "Inactive", + "Activating", "Active", "Deactivating". + :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'topology_name': {'key': 'topologyName', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, + 'state': {'key': 'state', 'type': 'str'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + topology_name: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDefinition"]] = None, + state: Optional[Union[str, "MediaGraphInstanceState"]] = None, + **kwargs + ): + super(MediaGraphInstanceProperties, self).__init__(**kwargs) + self.description = description + self.topology_name = topology_name + self.parameters = parameters + self.state = state + + +class MediaGraphInstanceSetRequest(OperationBase): + """MediaGraphInstanceSetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param instance: Required. Represents a Media Graph instance. + :type instance: ~azure.media.lva.edge.models.MediaGraphInstance + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'instance': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + instance: "MediaGraphInstance", + **kwargs + ): + super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) + self.method_name = 'GraphInstanceSet' # type: str + self.instance = instance + + +class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): + """MediaGraphInstanceSetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Properties of a Media Graph instance. + :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphInstanceProperties"] = None, + **kwargs + ): + super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphIoTHubMessageSink(MediaGraphSink): + """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. Name to be used for the media graph sink. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this sink node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param hub_output_name: Name of the output path to which the graph will publish message. These + messages can then be delivered to desired destinations by declaring routes referencing the + output path in the IoT Edge deployment manifest. + :type hub_output_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + hub_output_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str + self.hub_output_name = hub_output_name + + +class MediaGraphSource(msrest.serialization.Model): + """Media graph source. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphSource, self).__init__(**kwargs) + self.type = None # type: Optional[str] + self.name = name + + +class MediaGraphIoTHubMessageSource(MediaGraphSource): + """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param hub_input_name: Name of the input path where messages can be routed to (via routes + declared in the IoT Edge deployment manifest). + :type hub_input_name: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + hub_input_name: Optional[str] = None, + **kwargs + ): + super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str + self.hub_input_name = hub_input_name + + +class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): + """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param sensitivity: Enumeration that specifies the sensitivity of the motion detection + processor. Possible values include: "Low", "Medium", "High". + :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity + :param output_motion_region: Indicates whether the processor should detect and output the + regions, within the video frame, where motion was detected. Default is true. + :type output_motion_region: bool + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, + 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, + output_motion_region: Optional[bool] = None, + **kwargs + ): + super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str + self.sensitivity = sensitivity + self.output_motion_region = output_motion_region + + +class MediaGraphNodeInput(msrest.serialization.Model): + """Represents the input to any node in a media graph. + + :param node_name: The name of another node in the media graph, the output of which is used as + input to this node. + :type node_name: str + :param output_selectors: Allows for the selection of particular streams from another node. + :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] + """ + + _attribute_map = { + 'node_name': {'key': 'nodeName', 'type': 'str'}, + 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, + } + + def __init__( + self, + *, + node_name: Optional[str] = None, + output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, + **kwargs + ): + super(MediaGraphNodeInput, self).__init__(**kwargs) + self.node_name = node_name + self.output_selectors = output_selectors + + +class MediaGraphOutputSelector(msrest.serialization.Model): + """Allows for the selection of particular streams from another node. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar property: The stream property to compare with. Default value: "mediaType". + :vartype property: str + :param operator: The operator to compare streams by. Possible values include: "is", "isNot". + :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator + :param value: Value to compare against. + :type value: str + """ + + _validation = { + 'property': {'constant': True}, + } + + _attribute_map = { + 'property': {'key': 'property', 'type': 'str'}, + 'operator': {'key': 'operator', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + property = "mediaType" + + def __init__( + self, + *, + operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, + value: Optional[str] = None, + **kwargs + ): + super(MediaGraphOutputSelector, self).__init__(**kwargs) + self.operator = operator + self.value = value + + +class MediaGraphParameterDeclaration(msrest.serialization.Model): + """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the parameter. + :type name: str + :param type: Required. name. Possible values include: "String", "SecretString", "Int", + "Double", "Bool". + :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType + :param description: Description of the parameter. + :type description: str + :param default: The default value for the parameter, to be used if the graph instance does not + specify a value. + :type default: str + """ + + _validation = { + 'name': {'required': True, 'max_length': 64, 'min_length': 0}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'default': {'key': 'default', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "MediaGraphParameterType"], + description: Optional[str] = None, + default: Optional[str] = None, + **kwargs + ): + super(MediaGraphParameterDeclaration, self).__init__(**kwargs) + self.name = name + self.type = type + self.description = description + self.default = default + + +class MediaGraphParameterDefinition(msrest.serialization.Model): + """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. Name of parameter as defined in the graph topology. + :type name: str + :param value: Required. Value of parameter. + :type value: str + """ + + _validation = { + 'name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'value': {'key': 'value', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + value: str, + **kwargs + ): + super(MediaGraphParameterDefinition, self).__init__(**kwargs) + self.name = name + self.value = value + + +class MediaGraphPemCertificateList(MediaGraphCertificateSource): + """A list of PEM formatted certificates. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param certificates: Required. PEM formatted public certificates one per entry. + :type certificates: list[str] + """ + + _validation = { + 'type': {'required': True}, + 'certificates': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'certificates': {'key': 'certificates', 'type': '[str]'}, + } + + def __init__( + self, + *, + certificates: List[str], + **kwargs + ): + super(MediaGraphPemCertificateList, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str + self.certificates = certificates + + +class MediaGraphRtspSource(MediaGraphSource): + """Enables a graph to capture media from a RTSP server. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The type of the source node. The discriminator for derived + types.Constant filled by server. + :type type: str + :param name: Required. The name to be used for this source node. + :type name: str + :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. + Possible values include: "Http", "Tcp". + :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport + :param endpoint: Required. RTSP endpoint of the stream that is being connected to. + :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'endpoint': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'transport': {'key': 'transport', 'type': 'str'}, + 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, + } + + def __init__( + self, + *, + name: str, + endpoint: "MediaGraphEndpoint", + transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, + **kwargs + ): + super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) + self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str + self.transport = transport + self.endpoint = endpoint + + +class MediaGraphSignalGateProcessor(MediaGraphProcessor): + """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param name: Required. The name for this processor node. + :type name: str + :param inputs: Required. An array of the names of the other nodes in the media graph, the + outputs of which are used as input for this processor node. + :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] + :param activation_evaluation_window: The period of time over which the gate gathers input + events, before evaluating them. + :type activation_evaluation_window: str + :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It + is an offset between the time the event is received, and the timestamp of the first media + sample (eg. video frame) that is allowed through by the gate. + :type activation_signal_offset: str + :param minimum_activation_time: The minimum period for which the gate remains open, in the + absence of subsequent triggers (events). + :type minimum_activation_time: str + :param maximum_activation_time: The maximum period for which the gate remains open, in the + presence of subsequent events. + :type maximum_activation_time: str + """ + + _validation = { + 'type': {'required': True}, + 'name': {'required': True}, + 'inputs': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, + 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, + 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, + 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, + 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + inputs: List["MediaGraphNodeInput"], + activation_evaluation_window: Optional[str] = None, + activation_signal_offset: Optional[str] = None, + minimum_activation_time: Optional[str] = None, + maximum_activation_time: Optional[str] = None, + **kwargs + ): + super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) + self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str + self.activation_evaluation_window = activation_evaluation_window + self.activation_signal_offset = activation_signal_offset + self.minimum_activation_time = minimum_activation_time + self.maximum_activation_time = maximum_activation_time + + +class MediaGraphSystemData(msrest.serialization.Model): + """Graph system data. + + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_at: Optional[datetime.datetime] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(MediaGraphSystemData, self).__init__(**kwargs) + self.created_at = created_at + self.last_modified_at = last_modified_at + + +class MediaGraphTlsEndpoint(MediaGraphEndpoint): + """An endpoint that the graph can connect to, which must be connected over TLS/SSL. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null + designates that Azure Media Service's source of trust should be used. + :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource + :param validation_options: Validation options to use when authenticating a TLS connection. By + default, strict validation is used. + :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, + 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + trusted_certificates: Optional["MediaGraphCertificateSource"] = None, + validation_options: Optional["MediaGraphTlsValidationOptions"] = None, + **kwargs + ): + super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str + self.trusted_certificates = trusted_certificates + self.validation_options = validation_options + + +class MediaGraphTlsValidationOptions(msrest.serialization.Model): + """Options for controlling the authentication of TLS endpoints. + + :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. + :type ignore_hostname: str + :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the + current time. + :type ignore_signature: str + """ + + _attribute_map = { + 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, + 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, + } + + def __init__( + self, + *, + ignore_hostname: Optional[str] = None, + ignore_signature: Optional[str] = None, + **kwargs + ): + super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) + self.ignore_hostname = ignore_hostname + self.ignore_signature = ignore_signature + + +class MediaGraphTopology(msrest.serialization.Model): + """Describes a graph topology. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopology, self).__init__(**kwargs) + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphTopologyCollection(msrest.serialization.Model): + """Collection of graph topologies. + + :param value: Collection of graph topologies. + :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] + :param continuation_token: Continuation token to use in subsequent calls to enumerate through + the graph topologies collection (when the collection contains too many results to return in one + response). + :type continuation_token: str + """ + + _attribute_map = { + 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, + 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, + } + + def __init__( + self, + *, + value: Optional[List["MediaGraphTopology"]] = None, + continuation_token: Optional[str] = None, + **kwargs + ): + super(MediaGraphTopologyCollection, self).__init__(**kwargs) + self.value = value + self.continuation_token = continuation_token + + +class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): + """MediaGraphTopologyDeleteRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyDelete' # type: str + + +class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): + """MediaGraphTopologyGetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. method name. + :type name: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) + self.method_name = 'GraphTopologyGet' # type: str + + +class MediaGraphTopologyListRequest(OperationBase): + """MediaGraphTopologyListRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + } + + api_version = "1.0" + + def __init__( + self, + **kwargs + ): + super(MediaGraphTopologyListRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologyList' # type: str + + +class MediaGraphTopologyProperties(msrest.serialization.Model): + """Describes the properties of a graph topology. + + :param description: An optional description for the instance. + :type description: str + :param parameters: An optional description for the instance. + :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] + :param sources: An optional description for the instance. + :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] + :param processors: An optional description for the instance. + :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] + :param sinks: name. + :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] + """ + + _attribute_map = { + 'description': {'key': 'description', 'type': 'str'}, + 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, + 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, + 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, + 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, + } + + def __init__( + self, + *, + description: Optional[str] = None, + parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, + sources: Optional[List["MediaGraphSource"]] = None, + processors: Optional[List["MediaGraphProcessor"]] = None, + sinks: Optional[List["MediaGraphSink"]] = None, + **kwargs + ): + super(MediaGraphTopologyProperties, self).__init__(**kwargs) + self.description = description + self.parameters = parameters + self.sources = sources + self.processors = processors + self.sinks = sinks + + +class MediaGraphTopologySetRequest(OperationBase): + """MediaGraphTopologySetRequest. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param graph: Required. Describes a graph topology. + :type graph: ~azure.media.lva.edge.models.MediaGraphTopology + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'graph': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + graph: "MediaGraphTopology", + **kwargs + ): + super(MediaGraphTopologySetRequest, self).__init__(**kwargs) + self.method_name = 'GraphTopologySet' # type: str + self.graph = graph + + +class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): + """MediaGraphTopologySetRequestBody. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar method_name: method name.Constant filled by server. + :vartype method_name: str + :ivar api_version: api version. Default value: "1.0". + :vartype api_version: str + :param name: Required. name. + :type name: str + :param system_data: Graph system data. + :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData + :param properties: Describes the properties of a graph topology. + :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties + """ + + _validation = { + 'method_name': {'readonly': True}, + 'api_version': {'constant': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'method_name': {'key': 'methodName', 'type': 'str'}, + 'api_version': {'key': '@apiVersion', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, + 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, + } + + api_version = "1.0" + + def __init__( + self, + *, + name: str, + system_data: Optional["MediaGraphSystemData"] = None, + properties: Optional["MediaGraphTopologyProperties"] = None, + **kwargs + ): + super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.method_name = 'MediaGraphTopologySetRequestBody' # type: str + self.name = name + self.system_data = system_data + self.properties = properties + + +class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): + """An endpoint that the media graph can connect to, with no encryption in transit. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param credentials: Polymorphic credentials to be presented to the endpoint. + :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials + :param url: Required. Url for the endpoint. + :type url: str + """ + + _validation = { + 'type': {'required': True}, + 'url': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, + 'url': {'key': 'url', 'type': 'str'}, + } + + def __init__( + self, + *, + url: str, + credentials: Optional["MediaGraphCredentials"] = None, + **kwargs + ): + super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) + self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str + + +class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): + """Username/password credential pair. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. The discriminator for derived types.Constant filled by server. + :type type: str + :param username: Required. Username for a username/password pair. + :type username: str + :param password: Password for a username/password pair. + :type password: str + """ + + _validation = { + 'type': {'required': True}, + 'username': {'required': True}, + } + + _attribute_map = { + 'type': {'key': '@type', 'type': 'str'}, + 'username': {'key': 'username', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + } + + def __init__( + self, + *, + username: str, + password: Optional[str] = None, + **kwargs + ): + super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) + self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str + self.username = username + self.password = password diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed new file mode 100644 index 000000000000..e5aff4f83af8 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py new file mode 100644 index 000000000000..f95f18986f48 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py @@ -0,0 +1,7 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------- + +VERSION = '0.0.1' diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt new file mode 100644 index 000000000000..08bcfb306787 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/dev_requirements.txt @@ -0,0 +1,11 @@ +../../core/azure-core +-e ../../../tools/azure-devtools +-e ../../../tools/azure-sdk-tools +-e ../../identity/azure-identity +aiohttp>=3.0; python_version >= '3.5' +aiodns>=2.0; python_version >= '3.5' +msrest>=0.6.10 +pytest==5.4.2 +tox>=3.20.0 +tox-monorepo>=0.1.2 +pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py new file mode 100644 index 000000000000..c894b9b71a09 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py @@ -0,0 +1,48 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import asyncio +import os +from colorama import init, Style, Fore +init() + +from azure.identity.aio import DefaultAzureCredential +from azure.learnappconfig.aio import AppConfigurationClient +from azure.core.exceptions import ResourceNotFoundError, ResourceNotModifiedError +from azure.core import MatchConditions + + +async def main(): + url = os.environ.get('API-LEARN_ENDPOINT') + credential = DefaultAzureCredential() + async with AppConfigurationClient(account_url=url, credential=credential) as client: + + # Retrieve initial color value + try: + first_color = await client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) + except ResourceNotFoundError: + raise + + # Get latest color value, only if it has changed + try: + new_color = await client.get_configuration_setting( + key=os.environ['API-LEARN_SETTING_COLOR_KEY'], + match_condition=MatchConditions.IfModified, + etag=first_color.etag + ) + except ResourceNotModifiedError: + new_color = first_color + + color = getattr(Fore, new_color.value.upper()) + greeting = 'Hello!' + print(f'{color}{greeting}{Style.RESET_ALL}') + + +if __name__ == "__main__": + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py b/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py new file mode 100644 index 000000000000..f6fa6e0686fd --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py @@ -0,0 +1,35 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +from colorama import init, Style, Fore +init() + +from azure.identity import DefaultAzureCredential +from azure.learnappconfig import AppConfigurationClient + +def main(): + url = os.environ.get('API-LEARN_ENDPOINT') + credential = DefaultAzureCredential() + client = AppConfigurationClient(account_url=url, credential=credential) + + try: + color_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) + color = color_setting.value.upper() + text_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_TEXT_KEY']) + greeting = text_setting.value + except: + color = 'RED' + greeting = 'Default greeting' + + color = getattr(Fore, color) + print(f'{color}{greeting}{Style.RESET_ALL}') + + +if __name__ == "__main__": + main() diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py new file mode 100644 index 000000000000..9ac9ca9a817a --- /dev/null +++ b/sdk/media/azure-media-lva-edge/samples/sample_lva.py @@ -0,0 +1,83 @@ + +import json +import os +from azure.media.lva.edge._generated.models import * +from azure.iot.hub import IoTHubRegistryManager +from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult +from datetime import time + +device_id = "lva-sample-device" +module_d = "lvaEdge" +connection_string = "HostName=lvasamplehub77xvrvtar2bpw.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=o77hgzsswnBZsaGKVSDjSmm53m4ViJb/s1xv9zfDCi0=" +graph_instance_name = "graphInstance1" +graph_topology_name = "graphTopology1" + + +def build_graph_topology(): + graph_properties = MediaGraphTopologyProperties() + graph_properties.description = "Continuous video recording to an Azure Media Services Asset" + user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") + password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") + url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") + + source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) + node = MediaGraphNodeInput(node_name="rtspSource") + sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") + graph_properties.parameters = [user_name_param, password_param, url_param] + graph_properties.sources = [source] + graph_properties.sinks = [sink] + graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) + + return graph + +def build_graph_instance(): + url_param = MediaGraphParameterDefinition(name="rtspUrl", value="rtsp://rtspsim:554/media/camera-300s.mkv") + graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) + + graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) + + return graph_instance + +def invoke_method(method): + direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) + registry_manager = IoTHubRegistryManager(connection_string) + + return registry_manager.invoke_device_module_method(device_id, module_d, direct_method) + +def main(): + graph_topology = build_graph_topology() + graph_instance = build_graph_instance() + + try: + set_graph = invoke_method(MediaGraphTopologySetRequest(graph=graph_topology)) + set_graph_result = MediaGraphTopology.deserialize(set_graph) + + list_graph = invoke_method(MediaGraphTopologyListRequest()) + list_graph_result = MediaGraphTopology.deserialize(list_graph) + + get_graph = invoke_method(MediaGraphTopologyGetRequest(name=graph_topology_name)) + get_graph_result = MediaGraphTopology.deserialize(get_graph) + + set_graph_instance = invoke_method(MediaGraphInstanceSetRequest(instance=graph_instance)) + set_graph_instance_result = MediaGraphInstance.deserialize(set_graph_instance) + + activate_graph_instance = invoke_method(MediaGraphInstanceActivateRequest(name=graph_instance_name)) + activate_graph_instance_result = MediaGraphInstance.deserialize(activate_graph_instance) + + get_graph_instance = invoke_method(MediaGraphInstanceGetRequest(name=graph_instance_name)) + get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance) + + deactivate_graph_instance = invoke_method(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) + deactivate_graph_instance_result = MediaGraphInstance.deserialize(deactivate_graph_instance) + + delete_graph_instance = invoke_method(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) + delete_graph_instance_result = MediaGraphInstance.deserialize(delete_graph_instance) + + delete_graph = invoke_method(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) + delete_graph_result = MediaGraphTopology.deserialize(delete_graph) + + except Exception as ex: + print(ex) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/sdk_packaging.toml b/sdk/media/azure-media-lva-edge/sdk_packaging.toml new file mode 100644 index 000000000000..b366f78fb41b --- /dev/null +++ b/sdk/media/azure-media-lva-edge/sdk_packaging.toml @@ -0,0 +1,4 @@ +[packaging] +is_arm = false +need_msrestazure = false +auto_update = false diff --git a/sdk/media/azure-media-lva-edge/setup.cfg b/sdk/media/azure-media-lva-edge/setup.cfg new file mode 100644 index 000000000000..3c6e79cf31da --- /dev/null +++ b/sdk/media/azure-media-lva-edge/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-lva-edge/setup.py new file mode 100644 index 000000000000..d4a8c12edcc6 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/setup.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +import sys +import re +import os.path +from io import open +from setuptools import find_packages, setup + +# Change the PACKAGE_NAME only to change folder and different name +PACKAGE_NAME = "azure-media-lva-edge" +PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" + +# a-b-c => a/b/c +package_folder_path = PACKAGE_NAME.replace('-', '/') +# a-b-c => a.b.c +namespace_name = PACKAGE_NAME.replace('-', '.') + +# azure v0.x is not compatible with this package +# azure v0.x used to have a __version__ attribute (newer versions don't) +try: + import azure + try: + ver = azure.__version__ + raise Exception( + 'This package is incompatible with azure=={}. '.format(ver) + + 'Uninstall it with "pip uninstall azure".' + ) + except AttributeError: + pass +except ImportError: + pass + +# Version extraction inspired from 'requests' +with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd: + version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', + fd.read(), re.MULTILINE).group(1) + +if not version: + raise RuntimeError('Cannot find version information') + +with open('README.md', encoding='utf-8') as f: + readme = f.read() +with open('CHANGELOG.md', encoding='utf-8') as f: + changelog = f.read() + +exclude_packages = [ + 'tests', + 'tests.*', + 'samples', + # Exclude packages that will be covered by PEP420 or nspkg + 'azure', + ] +if sys.version_info < (3, 5, 3): + exclude_packages.extend([ + '*.aio', + '*.aio.*' + ]) + +setup( + name=PACKAGE_NAME, + version=version, + description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME), + long_description=readme + '\n\n' + changelog, + long_description_content_type='text/markdown', + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', + classifiers=[ + "Development Status :: 5 - Production/Stable", + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=find_packages(exclude=exclude_packages), + install_requires=[ + "msrest>=0.6.10", + "azure-core<2.0.0,>=1.0.0", + ], + extras_require={ + ":python_version<'3.0'": ['azure-nspkg'], + ":python_version<'3.4'": ['enum34>=1.0.4'], + ":python_version<'3.5'": ['typing'], + "async:python_version>='3.5'": [ + 'aiohttp>=3.0', + 'aiodns>=2.0' + ], + } +) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md new file mode 100644 index 000000000000..7880fc364c91 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/README.md @@ -0,0 +1,26 @@ +# Azure Queue Storage for Python + +> see https://aka.ms/autorest + + +### Generation +```ps +cd +autorest --v3 --python README.md +``` + +### Settings +```yaml +require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md +output-folder: ../azure/media/lva/edge/_generated +namespace: azure.media.lva.edge +no-namespace-folders: true +license-header: MICROSOFT_MIT_NO_VERSION +enable-xml: false +vanilla: true +clear-output-folder: true +add-credentials: false +python: true +package-version: "1.0" +public-clients: false +``` diff --git a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json new file mode 100644 index 000000000000..36b206ca6142 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json @@ -0,0 +1,1239 @@ +{ + "swagger": "2.0", + "info": { + "description": "Direct Methods for Live Video Analytics on IoT Edge.", + "version": "1.0.4", + "title": "Direct Methods for Live Video Analytics on IoT Edge", + "contact": { + "email": "amshelp@microsoft.com" + } + }, + "security": [ + { + "sharedAccessSignature": [] + } + ], + "paths": {}, + "securityDefinitions": { + "sharedAccessSignature": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "definitions": { + "OperationBase": { + "type": "object", + "properties": { + "methodName": { + "type": "string", + "description": "method name", + "readOnly": true + }, + "@apiVersion": { + "type": "string", + "description": "api version", + "enum": [ + "1.0" + ], + "x-ms-enum": { + "name": "ApiVersionEnum", + "modelAsString": false + } + } + }, + "discriminator": "methodName" + }, + "MediaGraphTopologySetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologySet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "graph" + ], + "properties": { + "graph": { + "$ref": "#/definitions/MediaGraphTopology" + } + } + }, + "MediaGraphTopologySetRequestBody": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologySet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + }, + { + "$ref": "#/definitions/MediaGraphTopology" + } + ] + }, + "MediaGraphInstanceSetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceSet", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "instance" + ], + "properties": { + "instance": { + "$ref": "#/definitions/MediaGraphInstance" + } + } + }, + "ItemNonSetRequestBase": { + "type": "object", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ], + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "method name" + } + } + }, + "MediaGraphTopologyListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyList", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ] + }, + "MediaGraphTopologyGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphTopologyDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphTopologyDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceListRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceList", + "allOf": [ + { + "$ref": "#/definitions/OperationBase" + } + ] + }, + "MediaGraphInstanceGetRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceGet", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceActivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceDeActivateRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDeactivate", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstanceDeleteRequest": { + "type": "object", + "x-ms-discriminator-value": "GraphInstanceDelete", + "allOf": [ + { + "$ref": "#/definitions/ItemNonSetRequestBase" + } + ] + }, + "MediaGraphInstance": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "name" + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphInstanceProperties" + } + }, + "description": "Represents a Media Graph instance." + }, + "MediaGraphInstanceProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "An optional description for the instance." + }, + "topologyName": { + "type": "string", + "description": "The name of the graph topology that this instance will run. A topology with this name should already have been set in the Edge module." + }, + "parameters": { + "type": "array", + "description": "List of one or more graph instance parameters.", + "items": { + "$ref": "#/definitions/MediaGraphParameterDefinition" + } + }, + "state": { + "type": "string", + "description": "Allowed states for a graph Instance.", + "enum": [ + "Inactive", + "Activating", + "Active", + "Deactivating" + ], + "x-ms-enum": { + "name": "MediaGraphInstanceState", + "values": [ + { + "value": "Inactive", + "description": "Inactive state." + }, + { + "value": "Activating", + "description": "Activating state." + }, + { + "value": "Active", + "description": "Active state." + }, + { + "value": "Deactivating", + "description": "Deactivating state." + } + ], + "modelAsString": false + } + } + }, + "description": "Properties of a Media Graph instance." + }, + "MediaGraphParameterDefinition": { + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of parameter as defined in the graph topology." + }, + "value": { + "type": "string", + "description": "Value of parameter." + } + }, + "description": "A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphInstanceCollection": { + "properties": { + "value": { + "type": "array", + "description": "Collection of graph instances.", + "items": { + "$ref": "#/definitions/MediaGraphInstance" + } + }, + "@continuationToken": { + "type": "string", + "description": "Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response)." + } + }, + "description": "Collection of graph instances." + }, + "MediaGraphTopologyCollection": { + "properties": { + "value": { + "type": "array", + "description": "Collection of graph topologies.", + "items": { + "$ref": "#/definitions/MediaGraphTopology" + } + }, + "@continuationToken": { + "type": "string", + "description": "Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response)." + } + }, + "description": "Collection of graph topologies." + }, + "MediaGraphTopology": { + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string", + "description": "name" + }, + "systemData": { + "$ref": "#/definitions/MediaGraphSystemData" + }, + "properties": { + "$ref": "#/definitions/MediaGraphTopologyProperties" + } + }, + "description": "Describes a graph topology." + }, + "MediaGraphTopologyProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "description": "An optional description for the instance." + }, + "parameters": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphParameterDeclaration" + } + }, + "sources": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphSource" + } + }, + "processors": { + "type": "array", + "description": "An optional description for the instance.", + "items": { + "$ref": "#/definitions/MediaGraphProcessor" + } + }, + "sinks": { + "description": "name", + "type": "array", + "items": { + "$ref": "#/definitions/MediaGraphSink" + } + } + }, + "description": "Describes the properties of a graph topology." + }, + "MediaGraphSystemData": { + "type": "object", + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource creation (UTC)." + }, + "lastModifiedAt": { + "type": "string", + "format": "date-time", + "description": "The timestamp of resource last modification (UTC)." + } + }, + "description": "Graph system data." + }, + "MediaGraphParameterDeclaration": { + "type": "object", + "required": [ + "name", + "type" + ], + "properties": { + "name": { + "type": "string", + "description": "The name of the parameter.", + "maxLength": 64 + }, + "type": { + "type": "string", + "description": "name", + "enum": [ + "String", + "SecretString", + "Int", + "Double", + "Bool" + ], + "x-ms-enum": { + "name": "MediaGraphParameterType", + "values": [ + { + "value": "String", + "description": "A string parameter value." + }, + { + "value": "SecretString", + "description": "A string to hold sensitive information as parameter value." + }, + { + "value": "Int", + "description": "A 32-bit signed integer as parameter value." + }, + { + "value": "Double", + "description": "A 64-bit double-precision floating point type as parameter value." + }, + { + "value": "Bool", + "description": "A boolean value that is either true or false." + } + ], + "modelAsString": false + } + }, + "description": { + "type": "string", + "description": "Description of the parameter." + }, + "default": { + "type": "string", + "description": "The default value for the parameter, to be used if the graph instance does not specify a value." + } + }, + "description": "The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." + }, + "MediaGraphSource": { + "type": "object", + "required": [ + "@type", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The type of the source node. The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name to be used for this source node." + } + }, + "description": "Media graph source." + }, + "MediaGraphRtspSource": { + "properties": { + "transport": { + "type": "string", + "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", + "enum": [ + "Http", + "Tcp" + ], + "x-ms-enum": { + "name": "MediaGraphRtspTransport", + "values": [ + { + "value": "Http", + "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." + }, + { + "value": "Tcp", + "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." + } + ], + "modelAsString": true + } + }, + "endpoint": { + "description": "RTSP endpoint of the stream that is being connected to.", + "$ref": "#/definitions/MediaGraphEndpoint" + } + }, + "required": [ + "endpoint" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + }, + {} + ], + "description": "Enables a graph to capture media from a RTSP server.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" + }, + "MediaGraphIoTHubMessageSource": { + "properties": { + "hubInputName": { + "type": "string", + "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSource" + }, + {} + ], + "description": "Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" + }, + "MediaGraphIoTHubMessageSink": { + "properties": { + "hubOutputName": { + "type": "string", + "description": "Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" + }, + "MediaGraphEndpoint": { + "type": "object", + "required": [ + "@type", + "url" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "credentials": { + "description": "Polymorphic credentials to be presented to the endpoint.", + "$ref": "#/definitions/MediaGraphCredentials" + }, + "url": { + "type": "string", + "description": "Url for the endpoint." + } + }, + "description": "Base class for endpoints." + }, + "MediaGraphCredentials": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Credentials to present during authentication." + }, + "MediaGraphUsernamePasswordCredentials": { + "properties": { + "username": { + "type": "string", + "description": "Username for a username/password pair." + }, + "password": { + "type": "string", + "description": "Password for a username/password pair." + } + }, + "required": [ + "username" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + }, + {} + ], + "description": "Username/password credential pair.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" + }, + "MediaGraphHttpHeaderCredentials": { + "properties": { + "headerName": { + "type": "string", + "description": "HTTP header name." + }, + "headerValue": { + "type": "string", + "description": "HTTP header value." + } + }, + "required": [ + "headerName", + "headerValue" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCredentials" + }, + {} + ], + "description": "Http header service credentials.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" + }, + "MediaGraphUnsecuredEndpoint": { + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + }, + {} + ], + "description": "An endpoint that the media graph can connect to, with no encryption in transit.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" + }, + "MediaGraphTlsEndpoint": { + "properties": { + "trustedCertificates": { + "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", + "$ref": "#/definitions/MediaGraphCertificateSource" + }, + "validationOptions": { + "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", + "$ref": "#/definitions/MediaGraphTlsValidationOptions" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphEndpoint" + }, + {} + ], + "description": "An endpoint that the graph can connect to, which must be connected over TLS/SSL.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" + }, + "MediaGraphCertificateSource": { + "type": "object", + "required": [ + "@type" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Base class for certificate sources." + }, + "MediaGraphTlsValidationOptions": { + "type": "object", + "properties": { + "ignoreHostname": { + "type": "string", + "description": "Boolean value ignoring the host name (common name) during validation." + }, + "ignoreSignature": { + "type": "string", + "description": "Boolean value ignoring the integrity of the certificate chain at the current time." + } + }, + "description": "Options for controlling the authentication of TLS endpoints." + }, + "MediaGraphPemCertificateList": { + "properties": { + "certificates": { + "type": "array", + "description": "PEM formatted public certificates one per entry.", + "items": { + "type": "string" + } + } + }, + "required": [ + "certificates" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphCertificateSource" + }, + {} + ], + "description": "A list of PEM formatted certificates.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" + }, + "MediaGraphSink": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "Name to be used for the media graph sink." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." + }, + "MediaGraphNodeInput": { + "type": "object", + "properties": { + "nodeName": { + "type": "string", + "description": "The name of another node in the media graph, the output of which is used as input to this node." + }, + "outputSelectors": { + "type": "array", + "description": "Allows for the selection of particular streams from another node.", + "items": { + "$ref": "#/definitions/MediaGraphOutputSelector" + } + } + }, + "description": "Represents the input to any node in a media graph." + }, + "MediaGraphOutputSelector": { + "properties": { + "property": { + "type": "string", + "description": "The stream property to compare with.", + "enum": [ + "mediaType" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorProperty", + "values": [ + { + "value": "mediaType", + "description": "The stream's MIME type or subtype." + } + ], + "modelAsString": false + } + }, + "operator": { + "type": "string", + "description": "The operator to compare streams by.", + "enum": [ + "is", + "isNot" + ], + "x-ms-enum": { + "name": "MediaGraphOutputSelectorOperator", + "values": [ + { + "value": "is", + "description": "A media type is the same type or a subtype." + }, + { + "value": "isNot", + "description": "A media type is not the same type or a subtype." + } + ], + "modelAsString": false + } + }, + "value": { + "type": "string", + "description": "Value to compare against." + } + }, + "description": "Allows for the selection of particular streams from another node." + }, + "MediaGraphFileSink": { + "properties": { + "filePathPattern": { + "type": "string", + "description": "Absolute file path pattern for creating new files on the Edge device.", + "minLength": 1 + } + }, + "required": [ + "filePathPattern" + ], + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" + }, + "MediaGraphAssetSink": { + "properties": { + "assetNamePattern": { + "type": "string", + "description": "A name pattern when creating new assets." + }, + "segmentLength": { + "type": "string", + "format": "duration", + "example": "PT30S", + "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." + }, + "localMediaCachePath": { + "type": "string", + "description": "Path to a local file system directory for temporary caching of media, before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure." + }, + "localMediaCacheMaximumSizeMiB": { + "type": "string", + "description": "Maximum amount of disk space that can be used for temporary caching of media." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphSink" + }, + {} + ], + "description": "Enables a graph to record media to an Azure Media Services asset, for subsequent playback.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" + }, + "MediaGraphProcessor": { + "type": "object", + "required": [ + "@type", + "inputs", + "name" + ], + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + }, + "name": { + "type": "string", + "description": "The name for this processor node." + }, + "inputs": { + "type": "array", + "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", + "items": { + "$ref": "#/definitions/MediaGraphNodeInput" + } + } + }, + "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." + }, + "MediaGraphMotionDetectionProcessor": { + "properties": { + "sensitivity": { + "type": "string", + "description": "Enumeration that specifies the sensitivity of the motion detection processor.", + "enum": [ + "Low", + "Medium", + "High" + ], + "x-ms-enum": { + "name": "MediaGraphMotionDetectionSensitivity", + "values": [ + { + "value": "Low", + "description": "Low Sensitivity." + }, + { + "value": "Medium", + "description": "Medium Sensitivity." + }, + { + "value": "High", + "description": "High Sensitivity." + } + ], + "modelAsString": true + } + }, + "outputMotionRegion": { + "type": "boolean", + "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" + }, + "MediaGraphExtensionProcessorBase": { + "properties": { + "endpoint": { + "description": "Endpoint to which this processor should connect.", + "$ref": "#/definitions/MediaGraphEndpoint" + }, + "image": { + "description": "Describes the parameters of the image that is sent as input to the endpoint.", + "$ref": "#/definitions/MediaGraphImage" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" + }, + "MediaGraphCognitiveServicesVisionExtension": { + "properties": {}, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + } + ], + "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" + }, + "MediaGraphGrpcExtension": { + "required": [ + "dataTransfer" + ], + "properties": { + "dataTransfer": { + "description": "How media should be transferred to the inferencing engine.", + "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + }, + {} + ], + "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + }, + "MediaGraphGrpcExtensionDataTransfer": { + "required": [ + "mode" + ], + "properties": { + "sharedMemorySizeMiB": { + "type": "string", + "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specificed otherwise." + }, + "mode": { + "type": "string", + "description": "How frame data should be transmitted to the inferencing engine.", + "enum": [ + "Embedded", + "SharedMemory" + ], + "x-ms-enum": { + "name": "MediaGraphGrpcExtensionDataTransferMode", + "values": [ + { + "value": "Embedded", + "description": "Frames are transferred embedded into the gRPC messages." + }, + { + "value": "SharedMemory", + "description": "Frames are transferred through shared memory." + } + ], + "modelAsString": true + } + } + }, + "description": "Describes how media should be transferred to the inferencing engine.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" + }, + "MediaGraphHttpExtension": { + "allOf": [ + { + "$ref": "#/definitions/MediaGraphExtensionProcessorBase" + }, + {} + ], + "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" + }, + "MediaGraphImage": { + "type": "object", + "properties": { + "scale": { + "$ref": "#/definitions/MediaGraphImageScale" + }, + "format": { + "$ref": "#/definitions/MediaGraphImageFormat" + } + }, + "description": "Describes the properties of an image frame." + }, + "MediaGraphImageScale": { + "type": "object", + "properties": { + "mode": { + "type": "string", + "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", + "enum": [ + "PreserveAspectRatio", + "Pad", + "Stretch" + ], + "x-ms-enum": { + "name": "MediaGraphImageScaleMode", + "values": [ + { + "value": "PreserveAspectRatio", + "description": "Use the same aspect ratio as the input frame." + }, + { + "value": "Pad", + "description": "Center pad the input frame to match the given dimensions." + }, + { + "value": "Stretch", + "description": "Stretch input frame to match given dimensions." + } + ], + "modelAsString": true + } + }, + "width": { + "type": "string", + "description": "The desired output width of the image." + }, + "height": { + "type": "string", + "description": "The desired output height of the image." + } + }, + "description": "The scaling mode for the image." + }, + "MediaGraphImageFormat": { + "required": [ + "@type" + ], + "type": "object", + "discriminator": "@type", + "properties": { + "@type": { + "type": "string", + "description": "The discriminator for derived types." + } + }, + "description": "Encoding settings for an image.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" + }, + "MediaGraphImageFormatRaw": { + "properties": { + "pixelFormat": { + "type": "string", + "description": "pixel format", + "enum": [ + "Yuv420p", + "Rgb565be", + "Rgb565le", + "Rgb555be", + "Rgb555le", + "Rgb24", + "Bgr24", + "Argb", + "Rgba", + "Abgr", + "Bgra" + ], + "x-ms-enum": { + "name": "MediaGraphImageFormatRawPixelFormat", + "values": [ + { + "value": "Yuv420p", + "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." + }, + { + "value": "Rgb565be", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." + }, + { + "value": "Rgb565le", + "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." + }, + { + "value": "Rgb555be", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." + }, + { + "value": "Rgb555le", + "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." + }, + { + "value": "Rgb24", + "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." + }, + { + "value": "Bgr24", + "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." + }, + { + "value": "Argb", + "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." + }, + { + "value": "Rgba", + "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." + }, + { + "value": "Abgr", + "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." + }, + { + "value": "Bgra", + "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." + } + ], + "modelAsString": true + } + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + }, + {} + ], + "description": "Encoding settings for raw images.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" + }, + "MediaGraphImageFormatEncoded": { + "properties": { + "encoding": { + "type": "string", + "description": "The different encoding formats that can be used for the image.", + "default": "Jpeg", + "enum": [ + "Jpeg", + "Bmp", + "Png" + ], + "x-ms-enum": { + "name": "MediaGraphImageEncodingFormat", + "values": [ + { + "value": "Jpeg", + "description": "JPEG image format." + }, + { + "value": "Bmp", + "description": "BMP image format." + }, + { + "value": "Png", + "description": "PNG image format." + } + ], + "modelAsString": true + } + }, + "quality": { + "type": "string", + "description": "The image quality (used for JPEG only). Value must be between 0 to 100 (best quality)." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphImageFormat" + }, + {} + ], + "description": "Allowed formats for the image.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatEncoded" + }, + "MediaGraphSignalGateProcessor": { + "properties": { + "activationEvaluationWindow": { + "type": "string", + "example": "PT1.0S", + "description": "The period of time over which the gate gathers input events, before evaluating them." + }, + "activationSignalOffset": { + "type": "string", + "example": "-PT1.0S", + "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." + }, + "minimumActivationTime": { + "type": "string", + "example": "PT1S", + "description": "The minimum period for which the gate remains open, in the absence of subsequent triggers (events)." + }, + "maximumActivationTime": { + "type": "string", + "example": "PT2S", + "description": "The maximum period for which the gate remains open, in the presence of subsequent events." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" + }, + "MediaGraphFrameRateFilterProcessor": { + "properties": { + "maximumFps": { + "type": "string", + "description": "Ensures that the frame rate of the video leaving this processor does not exceed this limit." + } + }, + "allOf": [ + { + "$ref": "#/definitions/MediaGraphProcessor" + }, + {} + ], + "description": "Limits the frame rate on the input video stream based on the maximumFps property.", + "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFrameRateFilterProcessor" + } + } +} diff --git a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt b/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt new file mode 100644 index 000000000000..0290e6671f32 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt @@ -0,0 +1,158 @@ +AutoRest code generation utility [cli version: 3.0.6247; node: v12.16.1, max-memory: 2048 gb] +(C) 2018 Microsoft Corporation. +https://aka.ms/autorest +NOTE: AutoRest core version selected from configuration: 3.0.6302. + Loading AutoRest core 'C:\Users\hivyas\.autorest\@autorest_core@3.0.6302\node_modules\@autorest\core\dist' (3.0.6302) + Loading AutoRest extension '@autorest/python' (5.1.0-preview.7->5.1.0-preview.7) + Loading AutoRest extension '@autorest/modelerfour' (4.15.400->4.15.400) + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphUnsecuredEndpoint' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphCognitiveServicesVisionExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphHttpExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphInstanceCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTopologyCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphRtspSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUsernamePasswordCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpHeaderCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUnsecuredEndpoint' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTlsEndpoint' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphPemCertificateList' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphOutputSelector' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFileSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphAssetSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphMotionDetectionProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphExtensionProcessorBase' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphCognitiveServicesVisionExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtensionDataTransfer' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpExtension' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatRaw' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatEncoded' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphSignalGateProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFrameRateFilterProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphRtspSource' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSource' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphUsernamePasswordCredentials' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphHttpHeaderCredentials' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphTlsEndpoint' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphPemCertificateList' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFileSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphAssetSink' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphMotionDetectionProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphExtensionProcessorBase' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphGrpcExtension' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatRaw' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatEncoded' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphSignalGateProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFrameRateFilterProcessor' has an allOf list with an empty object schema as a parent, removing it. + +WARNING (PreCheck/CheckDuplicateSchemas): Checking for duplicate schemas, this could take a (long) while. Run with --verbose for more detail. + +WARNING (Modeler/MissingType): The schema 'components·109p5kc·schemas·mediagraphrtspsource·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·109p5kc·schemas·mediagraphrtspsource·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1nh92cj·schemas·mediagraphfilesink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1nh92cj·schemas·mediagraphfilesink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·z5bgs5·schemas·mediagraphassetsink·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·z5bgs5·schemas·mediagraphassetsink·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1k6pka5·schemas·mediagraphimageformatraw·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1k6pka5·schemas·mediagraphimageformatraw·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1 + +WARNING (Modeler/MissingType): The schema 'components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1' has no type or format information whatsoever. Location: + file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1 +Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py new file mode 100644 index 000000000000..53b2dcb4ba92 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py @@ -0,0 +1,79 @@ +import asyncio +import functools +import os + +from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function +from devtools_testutils.azure_testcase import _is_autorest_v3 + +from .testcase import AppConfigTestCase + +class AsyncAppConfigTestCase(AppConfigTestCase): + def __init__(self, *args, **kwargs): + super(AppConfigTestCase, self).__init__(*args, **kwargs) + + class AsyncFakeCredential(object): + # fake async credential + async def get_token(self, *scopes, **kwargs): + return AccessToken('fake_token', 2527537086) + + async def close(self): + pass + + def create_basic_client(self, client_class, **kwargs): + # This is the patch for creating client using aio identity + + tenant_id = os.environ.get("AZURE_TENANT_ID", None) + client_id = os.environ.get("AZURE_CLIENT_ID", None) + secret = os.environ.get("AZURE_CLIENT_SECRET", None) + + if tenant_id and client_id and secret and self.is_live: + if _is_autorest_v3(client_class): + # Create azure-identity class using aio credential + from azure.identity.aio import ClientSecretCredential + credentials = ClientSecretCredential( + tenant_id=tenant_id, + client_id=client_id, + client_secret=secret + ) + else: + # Create msrestazure class + from msrestazure.azure_active_directory import ServicePrincipalCredentials + credentials = ServicePrincipalCredentials( + tenant=tenant_id, + client_id=client_id, + secret=secret + ) + else: + if _is_autorest_v3(client_class): + credentials = self.AsyncFakeCredential() + #credentials = self.settings.get_azure_core_credentials() + else: + credentials = self.settings.get_credentials() + + # Real client creation + # FIXME decide what is the final argument for that + # if self.is_playback(): + # kwargs.setdefault("polling_interval", 0) + if _is_autorest_v3(client_class): + kwargs.setdefault("logging_enable", True) + client = client_class( + credential=credentials, + **kwargs + ) + else: + client = client_class( + credentials=credentials, + **kwargs + ) + + if self.is_playback(): + try: + client._config.polling_interval = 0 # FIXME in azure-mgmt-core, make this a kwargs + except AttributeError: + pass + + if hasattr(client, "config"): # Autorest v2 + if self.is_playback(): + client.config.long_running_operation_timeout = 0 + client.config.enable_http_logger = True + return client diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py new file mode 100644 index 000000000000..c36aaed14908 --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/conftest.py @@ -0,0 +1,25 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py new file mode 100644 index 000000000000..5871ed8eef2f --- /dev/null +++ b/sdk/media/azure-media-lva-edge/tests/test_app_config.py @@ -0,0 +1 @@ +import pytest diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 58a0d6292800..2d63019f2b80 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,3 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia + - name: azure_media_lva_edge + safeName: azuremedialvaedge + From b90ff1379a6d726d3c92f2c7498cfaf0ca1216f7 Mon Sep 17 00:00:00 2001 From: hivyas Date: Thu, 10 Dec 2020 16:26:14 -0800 Subject: [PATCH 2/9] adding new pkg folder --- sdk/media/azure-media-lva-edge/CHANGELOG.md | 8 - sdk/media/azure-media-lva-edge/MANIFEST.in | 4 - sdk/media/azure-media-lva-edge/README.md | 38 - .../azure-media-lva-edge/azure/__init__.py | 7 - .../azure/media/lva/edge/__init__.py | 20 - .../media/lva/edge/_generated/_version.py | 9 - .../lva/edge/_generated/models/__init__.py | 199 -- ...r_live_video_analyticson_io_tedge_enums.py | 108 - .../lva/edge/_generated/models/_models.py | 2008 --------------- .../lva/edge/_generated/models/_models_py3.py | 2185 ----------------- .../azure/media/lva/edge/_generated/py.typed | 1 - .../azure/media/lva/edge/_version.py | 7 - .../azure-media-lva-edge/dev_requirements.txt | 11 - .../samples/sample_conditional_async.py | 48 - .../samples/sample_hello_world.py | 35 - .../samples/sample_lva.py | 83 - .../azure-media-lva-edge/sdk_packaging.toml | 4 - sdk/media/azure-media-lva-edge/setup.py | 102 - .../azure-media-lva-edge/swagger/README.md | 26 - .../swagger/appconfiguration.json | 1239 ---------- .../swagger/commandOutput.txt | 158 -- .../tests/_shared/asynctestcase.py | 79 - .../tests/_shared/testcase.py | 0 .../azure-media-lva-edge/tests/conftest.py | 25 - .../tests/test_app_config.py | 1 - sdk/media/azure-media-nspkg/CHANGELOG.md | 3 + sdk/media/azure-media-nspkg/MANIFEST.in | 3 + sdk/media/azure-media-nspkg/README.md | 12 + .../azure}/__init__.py | 2 +- .../azure-media-nspkg/azure/media/__init__.py | 1 + .../azure-media-nspkg/sdk_packaging.toml | 2 + .../setup.cfg | 0 sdk/media/azure-media-nspkg/setup.py | 39 + sdk/media/ci.yml | 2 - 34 files changed, 61 insertions(+), 6408 deletions(-) delete mode 100644 sdk/media/azure-media-lva-edge/CHANGELOG.md delete mode 100644 sdk/media/azure-media-lva-edge/MANIFEST.in delete mode 100644 sdk/media/azure-media-lva-edge/README.md delete mode 100644 sdk/media/azure-media-lva-edge/azure/__init__.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed delete mode 100644 sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py delete mode 100644 sdk/media/azure-media-lva-edge/dev_requirements.txt delete mode 100644 sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py delete mode 100644 sdk/media/azure-media-lva-edge/samples/sample_hello_world.py delete mode 100644 sdk/media/azure-media-lva-edge/samples/sample_lva.py delete mode 100644 sdk/media/azure-media-lva-edge/sdk_packaging.toml delete mode 100644 sdk/media/azure-media-lva-edge/setup.py delete mode 100644 sdk/media/azure-media-lva-edge/swagger/README.md delete mode 100644 sdk/media/azure-media-lva-edge/swagger/appconfiguration.json delete mode 100644 sdk/media/azure-media-lva-edge/swagger/commandOutput.txt delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/_shared/testcase.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/conftest.py delete mode 100644 sdk/media/azure-media-lva-edge/tests/test_app_config.py create mode 100644 sdk/media/azure-media-nspkg/CHANGELOG.md create mode 100644 sdk/media/azure-media-nspkg/MANIFEST.in create mode 100644 sdk/media/azure-media-nspkg/README.md rename sdk/media/{azure-media-lva-edge/azure/media/lva/edge/_generated => azure-media-nspkg/azure}/__init__.py (80%) create mode 100644 sdk/media/azure-media-nspkg/azure/media/__init__.py create mode 100644 sdk/media/azure-media-nspkg/sdk_packaging.toml rename sdk/media/{azure-media-lva-edge => azure-media-nspkg}/setup.cfg (100%) create mode 100644 sdk/media/azure-media-nspkg/setup.py diff --git a/sdk/media/azure-media-lva-edge/CHANGELOG.md b/sdk/media/azure-media-lva-edge/CHANGELOG.md deleted file mode 100644 index 816f21db092e..000000000000 --- a/sdk/media/azure-media-lva-edge/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ - -# Release History - -------------------- - -## 0.0.1 (Unreleased) - -- Training day! diff --git a/sdk/media/azure-media-lva-edge/MANIFEST.in b/sdk/media/azure-media-lva-edge/MANIFEST.in deleted file mode 100644 index 7ebdd947f8ff..000000000000 --- a/sdk/media/azure-media-lva-edge/MANIFEST.in +++ /dev/null @@ -1,4 +0,0 @@ -recursive-include tests *.py -include *.md -include azure/__init__.py -recursive-include samples *.py *.md diff --git a/sdk/media/azure-media-lva-edge/README.md b/sdk/media/azure-media-lva-edge/README.md deleted file mode 100644 index c5012d4038c9..000000000000 --- a/sdk/media/azure-media-lva-edge/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Azure App Configuration client library for Python SDK Training - -Azure App Configuration is a managed service that helps developers centralize their application configurations simply and securely. - -Modern programs, especially programs running in a cloud, generally have many components that are distributed in nature. Spreading configuration settings across these components can lead to hard-to-troubleshoot errors during an application deployment. Use App Configuration to securely store all the settings for your application in one place. - -Use the client library for App Configuration to create and manage application configuration settings. - -## Prerequisites - -* Python 2.7, or 3.5 or later is required to use this package. -* You need an [Azure subscription][azure_sub], and a [Configuration Store][configuration_store] to use this package. - -To create a Configuration Store, you can use the Azure Portal or [Azure CLI][azure_cli]. - -After that, create the Configuration Store: - -```Powershell -az appconfig create --name --resource-group --location eastus -``` - - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. -For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether -you need to provide a CLA and decorate the PR appropriately (e.g., label, -comment). Simply follow the instructions provided by the bot. You will only -need to do this once across all repos using our CLA. - -This project has adopted the -[Microsoft Open Source Code of Conduct][code_of_conduct]. For more information, -see the Code of Conduct FAQ or contact opencode@microsoft.com with any -additional questions or comments. diff --git a/sdk/media/azure-media-lva-edge/azure/__init__.py b/sdk/media/azure-media-lva-edge/azure/__init__.py deleted file mode 100644 index 0e40e134bdac..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------- - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py deleted file mode 100644 index 725cd6860541..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore -from azure.media.lva.edge._generated.models import MediaGraphTopologySetRequestBody, MediaGraphTopologySetRequest, MediaGraphInstanceSetRequest, MediaGraphInstanceSetRequestBody - -def _OverrideTopologySetRequestSerialize(self): - graph_body = MediaGraphTopologySetRequestBody(name=self.graph.name) - graph_body.system_data = self.graph.system_data - graph_body.properties = self.graph.properties - - return graph_body.serialize() - -MediaGraphTopologySetRequest.serialize = _OverrideTopologySetRequestSerialize - -def _OverrideInstanceSetRequestSerialize(self): - graph_body = MediaGraphInstanceSetRequestBody(name=self.instance.name) - graph_body.system_data = self.instance.system_data - graph_body.properties = self.instance.properties - - return graph_body.serialize() - -MediaGraphInstanceSetRequest.serialize = _OverrideInstanceSetRequestSerialize \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py deleted file mode 100644 index 31ed98425268..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/_version.py +++ /dev/null @@ -1,9 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -VERSION = "1.0" diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py deleted file mode 100644 index 2e389ab8ef9d..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/__init__.py +++ /dev/null @@ -1,199 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -try: - from ._models_py3 import ItemNonSetRequestBase - from ._models_py3 import MediaGraphAssetSink - from ._models_py3 import MediaGraphCertificateSource - from ._models_py3 import MediaGraphCognitiveServicesVisionExtension - from ._models_py3 import MediaGraphCredentials - from ._models_py3 import MediaGraphEndpoint - from ._models_py3 import MediaGraphExtensionProcessorBase - from ._models_py3 import MediaGraphFileSink - from ._models_py3 import MediaGraphFrameRateFilterProcessor - from ._models_py3 import MediaGraphGrpcExtension - from ._models_py3 import MediaGraphGrpcExtensionDataTransfer - from ._models_py3 import MediaGraphHttpExtension - from ._models_py3 import MediaGraphHttpHeaderCredentials - from ._models_py3 import MediaGraphImage - from ._models_py3 import MediaGraphImageFormat - from ._models_py3 import MediaGraphImageFormatEncoded - from ._models_py3 import MediaGraphImageFormatRaw - from ._models_py3 import MediaGraphImageScale - from ._models_py3 import MediaGraphInstance - from ._models_py3 import MediaGraphInstanceActivateRequest - from ._models_py3 import MediaGraphInstanceCollection - from ._models_py3 import MediaGraphInstanceDeActivateRequest - from ._models_py3 import MediaGraphInstanceDeleteRequest - from ._models_py3 import MediaGraphInstanceGetRequest - from ._models_py3 import MediaGraphInstanceListRequest - from ._models_py3 import MediaGraphInstanceProperties - from ._models_py3 import MediaGraphInstanceSetRequest - from ._models_py3 import MediaGraphInstanceSetRequestBody - from ._models_py3 import MediaGraphIoTHubMessageSink - from ._models_py3 import MediaGraphIoTHubMessageSource - from ._models_py3 import MediaGraphMotionDetectionProcessor - from ._models_py3 import MediaGraphNodeInput - from ._models_py3 import MediaGraphOutputSelector - from ._models_py3 import MediaGraphParameterDeclaration - from ._models_py3 import MediaGraphParameterDefinition - from ._models_py3 import MediaGraphPemCertificateList - from ._models_py3 import MediaGraphProcessor - from ._models_py3 import MediaGraphRtspSource - from ._models_py3 import MediaGraphSignalGateProcessor - from ._models_py3 import MediaGraphSink - from ._models_py3 import MediaGraphSource - from ._models_py3 import MediaGraphSystemData - from ._models_py3 import MediaGraphTlsEndpoint - from ._models_py3 import MediaGraphTlsValidationOptions - from ._models_py3 import MediaGraphTopology - from ._models_py3 import MediaGraphTopologyCollection - from ._models_py3 import MediaGraphTopologyDeleteRequest - from ._models_py3 import MediaGraphTopologyGetRequest - from ._models_py3 import MediaGraphTopologyListRequest - from ._models_py3 import MediaGraphTopologyProperties - from ._models_py3 import MediaGraphTopologySetRequest - from ._models_py3 import MediaGraphTopologySetRequestBody - from ._models_py3 import MediaGraphUnsecuredEndpoint - from ._models_py3 import MediaGraphUsernamePasswordCredentials - from ._models_py3 import OperationBase -except (SyntaxError, ImportError): - from ._models import ItemNonSetRequestBase # type: ignore - from ._models import MediaGraphAssetSink # type: ignore - from ._models import MediaGraphCertificateSource # type: ignore - from ._models import MediaGraphCognitiveServicesVisionExtension # type: ignore - from ._models import MediaGraphCredentials # type: ignore - from ._models import MediaGraphEndpoint # type: ignore - from ._models import MediaGraphExtensionProcessorBase # type: ignore - from ._models import MediaGraphFileSink # type: ignore - from ._models import MediaGraphFrameRateFilterProcessor # type: ignore - from ._models import MediaGraphGrpcExtension # type: ignore - from ._models import MediaGraphGrpcExtensionDataTransfer # type: ignore - from ._models import MediaGraphHttpExtension # type: ignore - from ._models import MediaGraphHttpHeaderCredentials # type: ignore - from ._models import MediaGraphImage # type: ignore - from ._models import MediaGraphImageFormat # type: ignore - from ._models import MediaGraphImageFormatEncoded # type: ignore - from ._models import MediaGraphImageFormatRaw # type: ignore - from ._models import MediaGraphImageScale # type: ignore - from ._models import MediaGraphInstance # type: ignore - from ._models import MediaGraphInstanceActivateRequest # type: ignore - from ._models import MediaGraphInstanceCollection # type: ignore - from ._models import MediaGraphInstanceDeActivateRequest # type: ignore - from ._models import MediaGraphInstanceDeleteRequest # type: ignore - from ._models import MediaGraphInstanceGetRequest # type: ignore - from ._models import MediaGraphInstanceListRequest # type: ignore - from ._models import MediaGraphInstanceProperties # type: ignore - from ._models import MediaGraphInstanceSetRequest # type: ignore - from ._models import MediaGraphInstanceSetRequestBody # type: ignore - from ._models import MediaGraphIoTHubMessageSink # type: ignore - from ._models import MediaGraphIoTHubMessageSource # type: ignore - from ._models import MediaGraphMotionDetectionProcessor # type: ignore - from ._models import MediaGraphNodeInput # type: ignore - from ._models import MediaGraphOutputSelector # type: ignore - from ._models import MediaGraphParameterDeclaration # type: ignore - from ._models import MediaGraphParameterDefinition # type: ignore - from ._models import MediaGraphPemCertificateList # type: ignore - from ._models import MediaGraphProcessor # type: ignore - from ._models import MediaGraphRtspSource # type: ignore - from ._models import MediaGraphSignalGateProcessor # type: ignore - from ._models import MediaGraphSink # type: ignore - from ._models import MediaGraphSource # type: ignore - from ._models import MediaGraphSystemData # type: ignore - from ._models import MediaGraphTlsEndpoint # type: ignore - from ._models import MediaGraphTlsValidationOptions # type: ignore - from ._models import MediaGraphTopology # type: ignore - from ._models import MediaGraphTopologyCollection # type: ignore - from ._models import MediaGraphTopologyDeleteRequest # type: ignore - from ._models import MediaGraphTopologyGetRequest # type: ignore - from ._models import MediaGraphTopologyListRequest # type: ignore - from ._models import MediaGraphTopologyProperties # type: ignore - from ._models import MediaGraphTopologySetRequest # type: ignore - from ._models import MediaGraphTopologySetRequestBody # type: ignore - from ._models import MediaGraphUnsecuredEndpoint # type: ignore - from ._models import MediaGraphUsernamePasswordCredentials # type: ignore - from ._models import OperationBase # type: ignore - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import ( - MediaGraphGrpcExtensionDataTransferMode, - MediaGraphImageEncodingFormat, - MediaGraphImageFormatRawPixelFormat, - MediaGraphImageScaleMode, - MediaGraphInstanceState, - MediaGraphMotionDetectionSensitivity, - MediaGraphOutputSelectorOperator, - MediaGraphParameterType, - MediaGraphRtspTransport, -) - -__all__ = [ - 'ItemNonSetRequestBase', - 'MediaGraphAssetSink', - 'MediaGraphCertificateSource', - 'MediaGraphCognitiveServicesVisionExtension', - 'MediaGraphCredentials', - 'MediaGraphEndpoint', - 'MediaGraphExtensionProcessorBase', - 'MediaGraphFileSink', - 'MediaGraphFrameRateFilterProcessor', - 'MediaGraphGrpcExtension', - 'MediaGraphGrpcExtensionDataTransfer', - 'MediaGraphHttpExtension', - 'MediaGraphHttpHeaderCredentials', - 'MediaGraphImage', - 'MediaGraphImageFormat', - 'MediaGraphImageFormatEncoded', - 'MediaGraphImageFormatRaw', - 'MediaGraphImageScale', - 'MediaGraphInstance', - 'MediaGraphInstanceActivateRequest', - 'MediaGraphInstanceCollection', - 'MediaGraphInstanceDeActivateRequest', - 'MediaGraphInstanceDeleteRequest', - 'MediaGraphInstanceGetRequest', - 'MediaGraphInstanceListRequest', - 'MediaGraphInstanceProperties', - 'MediaGraphInstanceSetRequest', - 'MediaGraphInstanceSetRequestBody', - 'MediaGraphIoTHubMessageSink', - 'MediaGraphIoTHubMessageSource', - 'MediaGraphMotionDetectionProcessor', - 'MediaGraphNodeInput', - 'MediaGraphOutputSelector', - 'MediaGraphParameterDeclaration', - 'MediaGraphParameterDefinition', - 'MediaGraphPemCertificateList', - 'MediaGraphProcessor', - 'MediaGraphRtspSource', - 'MediaGraphSignalGateProcessor', - 'MediaGraphSink', - 'MediaGraphSource', - 'MediaGraphSystemData', - 'MediaGraphTlsEndpoint', - 'MediaGraphTlsValidationOptions', - 'MediaGraphTopology', - 'MediaGraphTopologyCollection', - 'MediaGraphTopologyDeleteRequest', - 'MediaGraphTopologyGetRequest', - 'MediaGraphTopologyListRequest', - 'MediaGraphTopologyProperties', - 'MediaGraphTopologySetRequest', - 'MediaGraphTopologySetRequestBody', - 'MediaGraphUnsecuredEndpoint', - 'MediaGraphUsernamePasswordCredentials', - 'OperationBase', - 'MediaGraphGrpcExtensionDataTransferMode', - 'MediaGraphImageEncodingFormat', - 'MediaGraphImageFormatRawPixelFormat', - 'MediaGraphImageScaleMode', - 'MediaGraphInstanceState', - 'MediaGraphMotionDetectionSensitivity', - 'MediaGraphOutputSelectorOperator', - 'MediaGraphParameterType', - 'MediaGraphRtspTransport', -] diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py deleted file mode 100644 index 6e78e4728244..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_definitionsfor_live_video_analyticson_io_tedge_enums.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum, EnumMeta -from six import with_metaclass - -class _CaseInsensitiveEnumMeta(EnumMeta): - def __getitem__(self, name): - return super().__getitem__(name.upper()) - - def __getattr__(cls, name): - """Return the enum member matching `name` - We use __getattr__ instead of descriptors or inserting into the enum - class' __dict__ in order to support `name` and `value` being both - properties for enum members (which live in the class' __dict__) and - enum members themselves. - """ - try: - return cls._member_map_[name.upper()] - except KeyError: - raise AttributeError(name) - - -class MediaGraphGrpcExtensionDataTransferMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """How frame data should be transmitted to the inferencing engine. - """ - - EMBEDDED = "Embedded" #: Frames are transferred embedded into the gRPC messages. - SHARED_MEMORY = "SharedMemory" #: Frames are transferred through shared memory. - -class MediaGraphImageEncodingFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The different encoding formats that can be used for the image. - """ - - JPEG = "Jpeg" #: JPEG image format. - BMP = "Bmp" #: BMP image format. - PNG = "Png" #: PNG image format. - -class MediaGraphImageFormatRawPixelFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """pixel format - """ - - YUV420_P = "Yuv420p" #: Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples). - RGB565_BE = "Rgb565be" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian. - RGB565_LE = "Rgb565le" #: Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian. - RGB555_BE = "Rgb555be" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined. - RGB555_LE = "Rgb555le" #: Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined. - RGB24 = "Rgb24" #: Packed RGB 8:8:8, 24bpp, RGBRGB. - BGR24 = "Bgr24" #: Packed RGB 8:8:8, 24bpp, BGRBGR. - ARGB = "Argb" #: Packed ARGB 8:8:8:8, 32bpp, ARGBARGB. - RGBA = "Rgba" #: Packed RGBA 8:8:8:8, 32bpp, RGBARGBA. - ABGR = "Abgr" #: Packed ABGR 8:8:8:8, 32bpp, ABGRABGR. - BGRA = "Bgra" #: Packed BGRA 8:8:8:8, 32bpp, BGRABGRA. - -class MediaGraphImageScaleMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Describes the modes for scaling an input video frame into an image, before it is sent to an - inference engine. - """ - - PRESERVE_ASPECT_RATIO = "PreserveAspectRatio" #: Use the same aspect ratio as the input frame. - PAD = "Pad" #: Center pad the input frame to match the given dimensions. - STRETCH = "Stretch" #: Stretch input frame to match given dimensions. - -class MediaGraphInstanceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Allowed states for a graph Instance. - """ - - INACTIVE = "Inactive" #: Inactive state. - ACTIVATING = "Activating" #: Activating state. - ACTIVE = "Active" #: Active state. - DEACTIVATING = "Deactivating" #: Deactivating state. - -class MediaGraphMotionDetectionSensitivity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Enumeration that specifies the sensitivity of the motion detection processor. - """ - - LOW = "Low" #: Low Sensitivity. - MEDIUM = "Medium" #: Medium Sensitivity. - HIGH = "High" #: High Sensitivity. - -class MediaGraphOutputSelectorOperator(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """The operator to compare streams by. - """ - - IS_ENUM = "is" #: A media type is the same type or a subtype. - IS_NOT = "isNot" #: A media type is not the same type or a subtype. - -class MediaGraphParameterType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """name - """ - - STRING = "String" #: A string parameter value. - SECRET_STRING = "SecretString" #: A string to hold sensitive information as parameter value. - INT = "Int" #: A 32-bit signed integer as parameter value. - DOUBLE = "Double" #: A 64-bit double-precision floating point type as parameter value. - BOOL = "Bool" #: A boolean value that is either true or false. - -class MediaGraphRtspTransport(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - """Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - """ - - HTTP = "Http" #: HTTP/HTTPS transport. This should be used when HTTP tunneling is desired. - TCP = "Tcp" #: TCP transport. This should be used when HTTP tunneling is NOT desired. diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py deleted file mode 100644 index 62f58c7ea385..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models.py +++ /dev/null @@ -1,2008 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import msrest.serialization - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = kwargs['name'] - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = kwargs.get('asset_name_pattern', None) - self.segment_length = kwargs.get('segment_length', None) - self.local_media_cache_path = kwargs.get('local_media_cache_path', None) - self.local_media_cache_maximum_size_mi_b = kwargs.get('local_media_cache_maximum_size_mi_b', None) - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - self.inputs = kwargs['inputs'] - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = kwargs.get('endpoint', None) - self.image = kwargs.get('image', None) - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = kwargs.get('credentials', None) - self.url = kwargs['url'] - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = kwargs['file_path_pattern'] - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = kwargs.get('maximum_fps', None) - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = kwargs['data_transfer'] - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = kwargs.get('shared_memory_size_mi_b', None) - self.mode = kwargs['mode'] - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = kwargs['header_name'] - self.header_value = kwargs['header_value'] - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = kwargs.get('scale', None) - self.format = kwargs.get('format', None) - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = kwargs.get('encoding', "Jpeg") - self.quality = kwargs.get('quality', None) - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = kwargs.get('pixel_format', None) - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = kwargs.get('mode', None) - self.width = kwargs.get('width', None) - self.height = kwargs.get('height', None) - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.topology_name = kwargs.get('topology_name', None) - self.parameters = kwargs.get('parameters', None) - self.state = kwargs.get('state', None) - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = kwargs['instance'] - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = kwargs.get('hub_output_name', None) - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = kwargs['name'] - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = kwargs.get('hub_input_name', None) - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = kwargs.get('sensitivity', None) - self.output_motion_region = kwargs.get('output_motion_region', None) - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = kwargs.get('node_name', None) - self.output_selectors = kwargs.get('output_selectors', None) - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = kwargs.get('operator', None) - self.value = kwargs.get('value', None) - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = kwargs['name'] - self.type = kwargs['type'] - self.description = kwargs.get('description', None) - self.default = kwargs.get('default', None) - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = kwargs['name'] - self.value = kwargs['value'] - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = kwargs['certificates'] - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = kwargs.get('transport', None) - self.endpoint = kwargs['endpoint'] - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = kwargs.get('activation_evaluation_window', None) - self.activation_signal_offset = kwargs.get('activation_signal_offset', None) - self.minimum_activation_time = kwargs.get('minimum_activation_time', None) - self.maximum_activation_time = kwargs.get('maximum_activation_time', None) - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = kwargs.get('created_at', None) - self.last_modified_at = kwargs.get('last_modified_at', None) - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = kwargs.get('trusted_certificates', None) - self.validation_options = kwargs.get('validation_options', None) - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = kwargs.get('ignore_hostname', None) - self.ignore_signature = kwargs.get('ignore_signature', None) - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = kwargs.get('value', None) - self.continuation_token = kwargs.get('continuation_token', None) - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = kwargs.get('description', None) - self.parameters = kwargs.get('parameters', None) - self.sources = kwargs.get('sources', None) - self.processors = kwargs.get('processors', None) - self.sinks = kwargs.get('sinks', None) - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = kwargs['graph'] - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(**kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = kwargs['name'] - self.system_data = kwargs.get('system_data', None) - self.properties = kwargs.get('properties', None) - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = kwargs['username'] - self.password = kwargs.get('password', None) diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py deleted file mode 100644 index 5de3adde8e11..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/models/_models_py3.py +++ /dev/null @@ -1,2185 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import List, Optional, Union - -import msrest.serialization - -from ._definitionsfor_live_video_analyticson_io_tedge_enums import * - - -class OperationBase(msrest.serialization.Model): - """OperationBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceListRequest, MediaGraphInstanceSetRequest, MediaGraphTopologyListRequest, MediaGraphTopologySetRequest, ItemNonSetRequestBase, MediaGraphInstanceSetRequestBody, MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceList': 'MediaGraphInstanceListRequest', 'GraphInstanceSet': 'MediaGraphInstanceSetRequest', 'GraphTopologyList': 'MediaGraphTopologyListRequest', 'GraphTopologySet': 'MediaGraphTopologySetRequest', 'ItemNonSetRequestBase': 'ItemNonSetRequestBase', 'MediaGraphInstanceSetRequestBody': 'MediaGraphInstanceSetRequestBody', 'MediaGraphTopologySetRequestBody': 'MediaGraphTopologySetRequestBody'} - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(OperationBase, self).__init__(**kwargs) - self.method_name = None # type: Optional[str] - - -class ItemNonSetRequestBase(OperationBase): - """ItemNonSetRequestBase. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphInstanceActivateRequest, MediaGraphInstanceDeActivateRequest, MediaGraphInstanceDeleteRequest, MediaGraphInstanceGetRequest, MediaGraphTopologyDeleteRequest, MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'method_name': {'GraphInstanceActivate': 'MediaGraphInstanceActivateRequest', 'GraphInstanceDeactivate': 'MediaGraphInstanceDeActivateRequest', 'GraphInstanceDelete': 'MediaGraphInstanceDeleteRequest', 'GraphInstanceGet': 'MediaGraphInstanceGetRequest', 'GraphTopologyDelete': 'MediaGraphTopologyDeleteRequest', 'GraphTopologyGet': 'MediaGraphTopologyGetRequest'} - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(ItemNonSetRequestBase, self).__init__(**kwargs) - self.method_name = 'ItemNonSetRequestBase' # type: str - self.name = name - - -class MediaGraphSink(msrest.serialization.Model): - """Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphAssetSink, MediaGraphFileSink, MediaGraphIoTHubMessageSink. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphAssetSink': 'MediaGraphAssetSink', '#Microsoft.Media.MediaGraphFileSink': 'MediaGraphFileSink', '#Microsoft.Media.MediaGraphIoTHubMessageSink': 'MediaGraphIoTHubMessageSink'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphSink, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphAssetSink(MediaGraphSink): - """Enables a graph to record media to an Azure Media Services asset, for subsequent playback. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param asset_name_pattern: A name pattern when creating new assets. - :type asset_name_pattern: str - :param segment_length: When writing media to an asset, wait until at least this duration of - media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum - of 30 seconds and a recommended maximum of 5 minutes. - :type segment_length: ~datetime.timedelta - :param local_media_cache_path: Path to a local file system directory for temporary caching of - media, before writing to an Asset. Used when the Edge device is temporarily disconnected from - Azure. - :type local_media_cache_path: str - :param local_media_cache_maximum_size_mi_b: Maximum amount of disk space that can be used for - temporary caching of media. - :type local_media_cache_maximum_size_mi_b: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'asset_name_pattern': {'key': 'assetNamePattern', 'type': 'str'}, - 'segment_length': {'key': 'segmentLength', 'type': 'duration'}, - 'local_media_cache_path': {'key': 'localMediaCachePath', 'type': 'str'}, - 'local_media_cache_maximum_size_mi_b': {'key': 'localMediaCacheMaximumSizeMiB', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - asset_name_pattern: Optional[str] = None, - segment_length: Optional[datetime.timedelta] = None, - local_media_cache_path: Optional[str] = None, - local_media_cache_maximum_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphAssetSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphAssetSink' # type: str - self.asset_name_pattern = asset_name_pattern - self.segment_length = segment_length - self.local_media_cache_path = local_media_cache_path - self.local_media_cache_maximum_size_mi_b = local_media_cache_maximum_size_mi_b - - -class MediaGraphCertificateSource(msrest.serialization.Model): - """Base class for certificate sources. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphPemCertificateList. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphPemCertificateList': 'MediaGraphPemCertificateList'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCertificateSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphProcessor(msrest.serialization.Model): - """A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphExtensionProcessorBase, MediaGraphFrameRateFilterProcessor, MediaGraphMotionDetectionProcessor, MediaGraphSignalGateProcessor. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphExtensionProcessorBase': 'MediaGraphExtensionProcessorBase', '#Microsoft.Media.MediaGraphFrameRateFilterProcessor': 'MediaGraphFrameRateFilterProcessor', '#Microsoft.Media.MediaGraphMotionDetectionProcessor': 'MediaGraphMotionDetectionProcessor', '#Microsoft.Media.MediaGraphSignalGateProcessor': 'MediaGraphSignalGateProcessor'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - **kwargs - ): - super(MediaGraphProcessor, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - self.inputs = inputs - - -class MediaGraphExtensionProcessorBase(MediaGraphProcessor): - """Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphCognitiveServicesVisionExtension, MediaGraphGrpcExtension, MediaGraphHttpExtension. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension': 'MediaGraphCognitiveServicesVisionExtension', '#Microsoft.Media.MediaGraphGrpcExtension': 'MediaGraphGrpcExtension', '#Microsoft.Media.MediaGraphHttpExtension': 'MediaGraphHttpExtension'} - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphExtensionProcessorBase, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphExtensionProcessorBase' # type: str - self.endpoint = endpoint - self.image = image - - -class MediaGraphCognitiveServicesVisionExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphCognitiveServicesVisionExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension' # type: str - - -class MediaGraphCredentials(msrest.serialization.Model): - """Credentials to present during authentication. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphHttpHeaderCredentials, MediaGraphUsernamePasswordCredentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphHttpHeaderCredentials': 'MediaGraphHttpHeaderCredentials', '#Microsoft.Media.MediaGraphUsernamePasswordCredentials': 'MediaGraphUsernamePasswordCredentials'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphCredentials, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphEndpoint(msrest.serialization.Model): - """Base class for endpoints. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphTlsEndpoint, MediaGraphUnsecuredEndpoint. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphTlsEndpoint': 'MediaGraphTlsEndpoint', '#Microsoft.Media.MediaGraphUnsecuredEndpoint': 'MediaGraphUnsecuredEndpoint'} - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphEndpoint, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.credentials = credentials - self.url = url - - -class MediaGraphFileSink(MediaGraphSink): - """Enables a media graph to write/store media (video and audio) to a file on the Edge device. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param file_path_pattern: Required. Absolute file path pattern for creating new files on the - Edge device. - :type file_path_pattern: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'file_path_pattern': {'required': True, 'min_length': 1}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'file_path_pattern': {'key': 'filePathPattern', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - file_path_pattern: str, - **kwargs - ): - super(MediaGraphFileSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFileSink' # type: str - self.file_path_pattern = file_path_pattern - - -class MediaGraphFrameRateFilterProcessor(MediaGraphProcessor): - """Limits the frame rate on the input video stream based on the maximumFps property. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param maximum_fps: Ensures that the frame rate of the video leaving this processor does not - exceed this limit. - :type maximum_fps: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'maximum_fps': {'key': 'maximumFps', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - maximum_fps: Optional[str] = None, - **kwargs - ): - super(MediaGraphFrameRateFilterProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphFrameRateFilterProcessor' # type: str - self.maximum_fps = maximum_fps - - -class MediaGraphGrpcExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - :param data_transfer: Required. How media should be transferred to the inferencing engine. - :type data_transfer: ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransfer - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - 'data_transfer': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - 'data_transfer': {'key': 'dataTransfer', 'type': 'MediaGraphGrpcExtensionDataTransfer'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - data_transfer: "MediaGraphGrpcExtensionDataTransfer", - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphGrpcExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphGrpcExtension' # type: str - self.data_transfer = data_transfer - - -class MediaGraphGrpcExtensionDataTransfer(msrest.serialization.Model): - """Describes how media should be transferred to the inferencing engine. - - All required parameters must be populated in order to send to Azure. - - :param shared_memory_size_mi_b: The size of the buffer for all in-flight frames in mebibytes if - mode is SharedMemory. Should not be specificed otherwise. - :type shared_memory_size_mi_b: str - :param mode: Required. How frame data should be transmitted to the inferencing engine. Possible - values include: "Embedded", "SharedMemory". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphGrpcExtensionDataTransferMode - """ - - _validation = { - 'mode': {'required': True}, - } - - _attribute_map = { - 'shared_memory_size_mi_b': {'key': 'sharedMemorySizeMiB', 'type': 'str'}, - 'mode': {'key': 'mode', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Union[str, "MediaGraphGrpcExtensionDataTransferMode"], - shared_memory_size_mi_b: Optional[str] = None, - **kwargs - ): - super(MediaGraphGrpcExtensionDataTransfer, self).__init__(**kwargs) - self.shared_memory_size_mi_b = shared_memory_size_mi_b - self.mode = mode - - -class MediaGraphHttpExtension(MediaGraphExtensionProcessorBase): - """A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param endpoint: Endpoint to which this processor should connect. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - :param image: Describes the parameters of the image that is sent as input to the endpoint. - :type image: ~azure.media.lva.edge.models.MediaGraphImage - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - 'image': {'key': 'image', 'type': 'MediaGraphImage'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - endpoint: Optional["MediaGraphEndpoint"] = None, - image: Optional["MediaGraphImage"] = None, - **kwargs - ): - super(MediaGraphHttpExtension, self).__init__(name=name, inputs=inputs, endpoint=endpoint, image=image, **kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpExtension' # type: str - - -class MediaGraphHttpHeaderCredentials(MediaGraphCredentials): - """Http header service credentials. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param header_name: Required. HTTP header name. - :type header_name: str - :param header_value: Required. HTTP header value. - :type header_value: str - """ - - _validation = { - 'type': {'required': True}, - 'header_name': {'required': True}, - 'header_value': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'header_name': {'key': 'headerName', 'type': 'str'}, - 'header_value': {'key': 'headerValue', 'type': 'str'}, - } - - def __init__( - self, - *, - header_name: str, - header_value: str, - **kwargs - ): - super(MediaGraphHttpHeaderCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphHttpHeaderCredentials' # type: str - self.header_name = header_name - self.header_value = header_value - - -class MediaGraphImage(msrest.serialization.Model): - """Describes the properties of an image frame. - - :param scale: The scaling mode for the image. - :type scale: ~azure.media.lva.edge.models.MediaGraphImageScale - :param format: Encoding settings for an image. - :type format: ~azure.media.lva.edge.models.MediaGraphImageFormat - """ - - _attribute_map = { - 'scale': {'key': 'scale', 'type': 'MediaGraphImageScale'}, - 'format': {'key': 'format', 'type': 'MediaGraphImageFormat'}, - } - - def __init__( - self, - *, - scale: Optional["MediaGraphImageScale"] = None, - format: Optional["MediaGraphImageFormat"] = None, - **kwargs - ): - super(MediaGraphImage, self).__init__(**kwargs) - self.scale = scale - self.format = format - - -class MediaGraphImageFormat(msrest.serialization.Model): - """Encoding settings for an image. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphImageFormatEncoded, MediaGraphImageFormatRaw. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphImageFormatEncoded': 'MediaGraphImageFormatEncoded', '#Microsoft.Media.MediaGraphImageFormatRaw': 'MediaGraphImageFormatRaw'} - } - - def __init__( - self, - **kwargs - ): - super(MediaGraphImageFormat, self).__init__(**kwargs) - self.type = None # type: Optional[str] - - -class MediaGraphImageFormatEncoded(MediaGraphImageFormat): - """Allowed formats for the image. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param encoding: The different encoding formats that can be used for the image. Possible values - include: "Jpeg", "Bmp", "Png". Default value: "Jpeg". - :type encoding: str or ~azure.media.lva.edge.models.MediaGraphImageEncodingFormat - :param quality: The image quality (used for JPEG only). Value must be between 0 to 100 (best - quality). - :type quality: str - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'encoding': {'key': 'encoding', 'type': 'str'}, - 'quality': {'key': 'quality', 'type': 'str'}, - } - - def __init__( - self, - *, - encoding: Optional[Union[str, "MediaGraphImageEncodingFormat"]] = "Jpeg", - quality: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageFormatEncoded, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatEncoded' # type: str - self.encoding = encoding - self.quality = quality - - -class MediaGraphImageFormatRaw(MediaGraphImageFormat): - """Encoding settings for raw images. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param pixel_format: pixel format. Possible values include: "Yuv420p", "Rgb565be", "Rgb565le", - "Rgb555be", "Rgb555le", "Rgb24", "Bgr24", "Argb", "Rgba", "Abgr", "Bgra". - :type pixel_format: str or ~azure.media.lva.edge.models.MediaGraphImageFormatRawPixelFormat - """ - - _validation = { - 'type': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'pixel_format': {'key': 'pixelFormat', 'type': 'str'}, - } - - def __init__( - self, - *, - pixel_format: Optional[Union[str, "MediaGraphImageFormatRawPixelFormat"]] = None, - **kwargs - ): - super(MediaGraphImageFormatRaw, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphImageFormatRaw' # type: str - self.pixel_format = pixel_format - - -class MediaGraphImageScale(msrest.serialization.Model): - """The scaling mode for the image. - - :param mode: Describes the modes for scaling an input video frame into an image, before it is - sent to an inference engine. Possible values include: "PreserveAspectRatio", "Pad", "Stretch". - :type mode: str or ~azure.media.lva.edge.models.MediaGraphImageScaleMode - :param width: The desired output width of the image. - :type width: str - :param height: The desired output height of the image. - :type height: str - """ - - _attribute_map = { - 'mode': {'key': 'mode', 'type': 'str'}, - 'width': {'key': 'width', 'type': 'str'}, - 'height': {'key': 'height', 'type': 'str'}, - } - - def __init__( - self, - *, - mode: Optional[Union[str, "MediaGraphImageScaleMode"]] = None, - width: Optional[str] = None, - height: Optional[str] = None, - **kwargs - ): - super(MediaGraphImageScale, self).__init__(**kwargs) - self.mode = mode - self.width = width - self.height = height - - -class MediaGraphInstance(msrest.serialization.Model): - """Represents a Media Graph instance. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstance, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphInstanceActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceActivate' # type: str - - -class MediaGraphInstanceCollection(msrest.serialization.Model): - """Collection of graph instances. - - :param value: Collection of graph instances. - :type value: list[~azure.media.lva.edge.models.MediaGraphInstance] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph instance collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphInstance]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphInstance"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphInstanceCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphInstanceDeActivateRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeActivateRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeActivateRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDeactivate' # type: str - - -class MediaGraphInstanceDeleteRequest(ItemNonSetRequestBase): - """MediaGraphInstanceDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceDelete' # type: str - - -class MediaGraphInstanceGetRequest(ItemNonSetRequestBase): - """MediaGraphInstanceGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphInstanceGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphInstanceGet' # type: str - - -class MediaGraphInstanceListRequest(OperationBase): - """MediaGraphInstanceListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphInstanceListRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceList' # type: str - - -class MediaGraphInstanceProperties(msrest.serialization.Model): - """Properties of a Media Graph instance. - - :param description: An optional description for the instance. - :type description: str - :param topology_name: The name of the graph topology that this instance will run. A topology - with this name should already have been set in the Edge module. - :type topology_name: str - :param parameters: List of one or more graph instance parameters. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDefinition] - :param state: Allowed states for a graph Instance. Possible values include: "Inactive", - "Activating", "Active", "Deactivating". - :type state: str or ~azure.media.lva.edge.models.MediaGraphInstanceState - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'topology_name': {'key': 'topologyName', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDefinition]'}, - 'state': {'key': 'state', 'type': 'str'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - topology_name: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDefinition"]] = None, - state: Optional[Union[str, "MediaGraphInstanceState"]] = None, - **kwargs - ): - super(MediaGraphInstanceProperties, self).__init__(**kwargs) - self.description = description - self.topology_name = topology_name - self.parameters = parameters - self.state = state - - -class MediaGraphInstanceSetRequest(OperationBase): - """MediaGraphInstanceSetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param instance: Required. Represents a Media Graph instance. - :type instance: ~azure.media.lva.edge.models.MediaGraphInstance - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'instance': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'instance': {'key': 'instance', 'type': 'MediaGraphInstance'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - instance: "MediaGraphInstance", - **kwargs - ): - super(MediaGraphInstanceSetRequest, self).__init__(**kwargs) - self.method_name = 'GraphInstanceSet' # type: str - self.instance = instance - - -class MediaGraphInstanceSetRequestBody(MediaGraphInstance, OperationBase): - """MediaGraphInstanceSetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Properties of a Media Graph instance. - :type properties: ~azure.media.lva.edge.models.MediaGraphInstanceProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphInstanceProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphInstanceProperties"] = None, - **kwargs - ): - super(MediaGraphInstanceSetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.method_name = 'MediaGraphInstanceSetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphIoTHubMessageSink(MediaGraphSink): - """Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. Name to be used for the media graph sink. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this sink node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param hub_output_name: Name of the output path to which the graph will publish message. These - messages can then be delivered to desired destinations by declaring routes referencing the - output path in the IoT Edge deployment manifest. - :type hub_output_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'hub_output_name': {'key': 'hubOutputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - hub_output_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSink, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSink' # type: str - self.hub_output_name = hub_output_name - - -class MediaGraphSource(msrest.serialization.Model): - """Media graph source. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: MediaGraphIoTHubMessageSource, MediaGraphRtspSource. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'type': {'#Microsoft.Media.MediaGraphIoTHubMessageSource': 'MediaGraphIoTHubMessageSource', '#Microsoft.Media.MediaGraphRtspSource': 'MediaGraphRtspSource'} - } - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphSource, self).__init__(**kwargs) - self.type = None # type: Optional[str] - self.name = name - - -class MediaGraphIoTHubMessageSource(MediaGraphSource): - """Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param hub_input_name: Name of the input path where messages can be routed to (via routes - declared in the IoT Edge deployment manifest). - :type hub_input_name: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'hub_input_name': {'key': 'hubInputName', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - hub_input_name: Optional[str] = None, - **kwargs - ): - super(MediaGraphIoTHubMessageSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphIoTHubMessageSource' # type: str - self.hub_input_name = hub_input_name - - -class MediaGraphMotionDetectionProcessor(MediaGraphProcessor): - """A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param sensitivity: Enumeration that specifies the sensitivity of the motion detection - processor. Possible values include: "Low", "Medium", "High". - :type sensitivity: str or ~azure.media.lva.edge.models.MediaGraphMotionDetectionSensitivity - :param output_motion_region: Indicates whether the processor should detect and output the - regions, within the video frame, where motion was detected. Default is true. - :type output_motion_region: bool - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'sensitivity': {'key': 'sensitivity', 'type': 'str'}, - 'output_motion_region': {'key': 'outputMotionRegion', 'type': 'bool'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - sensitivity: Optional[Union[str, "MediaGraphMotionDetectionSensitivity"]] = None, - output_motion_region: Optional[bool] = None, - **kwargs - ): - super(MediaGraphMotionDetectionProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphMotionDetectionProcessor' # type: str - self.sensitivity = sensitivity - self.output_motion_region = output_motion_region - - -class MediaGraphNodeInput(msrest.serialization.Model): - """Represents the input to any node in a media graph. - - :param node_name: The name of another node in the media graph, the output of which is used as - input to this node. - :type node_name: str - :param output_selectors: Allows for the selection of particular streams from another node. - :type output_selectors: list[~azure.media.lva.edge.models.MediaGraphOutputSelector] - """ - - _attribute_map = { - 'node_name': {'key': 'nodeName', 'type': 'str'}, - 'output_selectors': {'key': 'outputSelectors', 'type': '[MediaGraphOutputSelector]'}, - } - - def __init__( - self, - *, - node_name: Optional[str] = None, - output_selectors: Optional[List["MediaGraphOutputSelector"]] = None, - **kwargs - ): - super(MediaGraphNodeInput, self).__init__(**kwargs) - self.node_name = node_name - self.output_selectors = output_selectors - - -class MediaGraphOutputSelector(msrest.serialization.Model): - """Allows for the selection of particular streams from another node. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar property: The stream property to compare with. Default value: "mediaType". - :vartype property: str - :param operator: The operator to compare streams by. Possible values include: "is", "isNot". - :type operator: str or ~azure.media.lva.edge.models.MediaGraphOutputSelectorOperator - :param value: Value to compare against. - :type value: str - """ - - _validation = { - 'property': {'constant': True}, - } - - _attribute_map = { - 'property': {'key': 'property', 'type': 'str'}, - 'operator': {'key': 'operator', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - property = "mediaType" - - def __init__( - self, - *, - operator: Optional[Union[str, "MediaGraphOutputSelectorOperator"]] = None, - value: Optional[str] = None, - **kwargs - ): - super(MediaGraphOutputSelector, self).__init__(**kwargs) - self.operator = operator - self.value = value - - -class MediaGraphParameterDeclaration(msrest.serialization.Model): - """The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the parameter. - :type name: str - :param type: Required. name. Possible values include: "String", "SecretString", "Int", - "Double", "Bool". - :type type: str or ~azure.media.lva.edge.models.MediaGraphParameterType - :param description: Description of the parameter. - :type description: str - :param default: The default value for the parameter, to be used if the graph instance does not - specify a value. - :type default: str - """ - - _validation = { - 'name': {'required': True, 'max_length': 64, 'min_length': 0}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'default': {'key': 'default', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "MediaGraphParameterType"], - description: Optional[str] = None, - default: Optional[str] = None, - **kwargs - ): - super(MediaGraphParameterDeclaration, self).__init__(**kwargs) - self.name = name - self.type = type - self.description = description - self.default = default - - -class MediaGraphParameterDefinition(msrest.serialization.Model): - """A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. Name of parameter as defined in the graph topology. - :type name: str - :param value: Required. Value of parameter. - :type value: str - """ - - _validation = { - 'name': {'required': True}, - 'value': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'value': {'key': 'value', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - value: str, - **kwargs - ): - super(MediaGraphParameterDefinition, self).__init__(**kwargs) - self.name = name - self.value = value - - -class MediaGraphPemCertificateList(MediaGraphCertificateSource): - """A list of PEM formatted certificates. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param certificates: Required. PEM formatted public certificates one per entry. - :type certificates: list[str] - """ - - _validation = { - 'type': {'required': True}, - 'certificates': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'certificates': {'key': 'certificates', 'type': '[str]'}, - } - - def __init__( - self, - *, - certificates: List[str], - **kwargs - ): - super(MediaGraphPemCertificateList, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphPemCertificateList' # type: str - self.certificates = certificates - - -class MediaGraphRtspSource(MediaGraphSource): - """Enables a graph to capture media from a RTSP server. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The type of the source node. The discriminator for derived - types.Constant filled by server. - :type type: str - :param name: Required. The name to be used for this source node. - :type name: str - :param transport: Underlying RTSP transport. This is used to enable or disable HTTP tunneling. - Possible values include: "Http", "Tcp". - :type transport: str or ~azure.media.lva.edge.models.MediaGraphRtspTransport - :param endpoint: Required. RTSP endpoint of the stream that is being connected to. - :type endpoint: ~azure.media.lva.edge.models.MediaGraphEndpoint - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'endpoint': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'transport': {'key': 'transport', 'type': 'str'}, - 'endpoint': {'key': 'endpoint', 'type': 'MediaGraphEndpoint'}, - } - - def __init__( - self, - *, - name: str, - endpoint: "MediaGraphEndpoint", - transport: Optional[Union[str, "MediaGraphRtspTransport"]] = None, - **kwargs - ): - super(MediaGraphRtspSource, self).__init__(name=name, **kwargs) - self.type = '#Microsoft.Media.MediaGraphRtspSource' # type: str - self.transport = transport - self.endpoint = endpoint - - -class MediaGraphSignalGateProcessor(MediaGraphProcessor): - """A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param name: Required. The name for this processor node. - :type name: str - :param inputs: Required. An array of the names of the other nodes in the media graph, the - outputs of which are used as input for this processor node. - :type inputs: list[~azure.media.lva.edge.models.MediaGraphNodeInput] - :param activation_evaluation_window: The period of time over which the gate gathers input - events, before evaluating them. - :type activation_evaluation_window: str - :param activation_signal_offset: Signal offset once the gate is activated (can be negative). It - is an offset between the time the event is received, and the timestamp of the first media - sample (eg. video frame) that is allowed through by the gate. - :type activation_signal_offset: str - :param minimum_activation_time: The minimum period for which the gate remains open, in the - absence of subsequent triggers (events). - :type minimum_activation_time: str - :param maximum_activation_time: The maximum period for which the gate remains open, in the - presence of subsequent events. - :type maximum_activation_time: str - """ - - _validation = { - 'type': {'required': True}, - 'name': {'required': True}, - 'inputs': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'inputs': {'key': 'inputs', 'type': '[MediaGraphNodeInput]'}, - 'activation_evaluation_window': {'key': 'activationEvaluationWindow', 'type': 'str'}, - 'activation_signal_offset': {'key': 'activationSignalOffset', 'type': 'str'}, - 'minimum_activation_time': {'key': 'minimumActivationTime', 'type': 'str'}, - 'maximum_activation_time': {'key': 'maximumActivationTime', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - inputs: List["MediaGraphNodeInput"], - activation_evaluation_window: Optional[str] = None, - activation_signal_offset: Optional[str] = None, - minimum_activation_time: Optional[str] = None, - maximum_activation_time: Optional[str] = None, - **kwargs - ): - super(MediaGraphSignalGateProcessor, self).__init__(name=name, inputs=inputs, **kwargs) - self.type = '#Microsoft.Media.MediaGraphSignalGateProcessor' # type: str - self.activation_evaluation_window = activation_evaluation_window - self.activation_signal_offset = activation_signal_offset - self.minimum_activation_time = minimum_activation_time - self.maximum_activation_time = maximum_activation_time - - -class MediaGraphSystemData(msrest.serialization.Model): - """Graph system data. - - :param created_at: The timestamp of resource creation (UTC). - :type created_at: ~datetime.datetime - :param last_modified_at: The timestamp of resource last modification (UTC). - :type last_modified_at: ~datetime.datetime - """ - - _attribute_map = { - 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, - 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, - } - - def __init__( - self, - *, - created_at: Optional[datetime.datetime] = None, - last_modified_at: Optional[datetime.datetime] = None, - **kwargs - ): - super(MediaGraphSystemData, self).__init__(**kwargs) - self.created_at = created_at - self.last_modified_at = last_modified_at - - -class MediaGraphTlsEndpoint(MediaGraphEndpoint): - """An endpoint that the graph can connect to, which must be connected over TLS/SSL. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - :param trusted_certificates: Trusted certificates when authenticating a TLS connection. Null - designates that Azure Media Service's source of trust should be used. - :type trusted_certificates: ~azure.media.lva.edge.models.MediaGraphCertificateSource - :param validation_options: Validation options to use when authenticating a TLS connection. By - default, strict validation is used. - :type validation_options: ~azure.media.lva.edge.models.MediaGraphTlsValidationOptions - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'MediaGraphCertificateSource'}, - 'validation_options': {'key': 'validationOptions', 'type': 'MediaGraphTlsValidationOptions'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - trusted_certificates: Optional["MediaGraphCertificateSource"] = None, - validation_options: Optional["MediaGraphTlsValidationOptions"] = None, - **kwargs - ): - super(MediaGraphTlsEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphTlsEndpoint' # type: str - self.trusted_certificates = trusted_certificates - self.validation_options = validation_options - - -class MediaGraphTlsValidationOptions(msrest.serialization.Model): - """Options for controlling the authentication of TLS endpoints. - - :param ignore_hostname: Boolean value ignoring the host name (common name) during validation. - :type ignore_hostname: str - :param ignore_signature: Boolean value ignoring the integrity of the certificate chain at the - current time. - :type ignore_signature: str - """ - - _attribute_map = { - 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, - 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, - } - - def __init__( - self, - *, - ignore_hostname: Optional[str] = None, - ignore_signature: Optional[str] = None, - **kwargs - ): - super(MediaGraphTlsValidationOptions, self).__init__(**kwargs) - self.ignore_hostname = ignore_hostname - self.ignore_signature = ignore_signature - - -class MediaGraphTopology(msrest.serialization.Model): - """Describes a graph topology. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopology, self).__init__(**kwargs) - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphTopologyCollection(msrest.serialization.Model): - """Collection of graph topologies. - - :param value: Collection of graph topologies. - :type value: list[~azure.media.lva.edge.models.MediaGraphTopology] - :param continuation_token: Continuation token to use in subsequent calls to enumerate through - the graph topologies collection (when the collection contains too many results to return in one - response). - :type continuation_token: str - """ - - _attribute_map = { - 'value': {'key': 'value', 'type': '[MediaGraphTopology]'}, - 'continuation_token': {'key': '@continuationToken', 'type': 'str'}, - } - - def __init__( - self, - *, - value: Optional[List["MediaGraphTopology"]] = None, - continuation_token: Optional[str] = None, - **kwargs - ): - super(MediaGraphTopologyCollection, self).__init__(**kwargs) - self.value = value - self.continuation_token = continuation_token - - -class MediaGraphTopologyDeleteRequest(ItemNonSetRequestBase): - """MediaGraphTopologyDeleteRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyDeleteRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyDelete' # type: str - - -class MediaGraphTopologyGetRequest(ItemNonSetRequestBase): - """MediaGraphTopologyGetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. method name. - :type name: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - **kwargs - ): - super(MediaGraphTopologyGetRequest, self).__init__(name=name, **kwargs) - self.method_name = 'GraphTopologyGet' # type: str - - -class MediaGraphTopologyListRequest(OperationBase): - """MediaGraphTopologyListRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - } - - api_version = "1.0" - - def __init__( - self, - **kwargs - ): - super(MediaGraphTopologyListRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologyList' # type: str - - -class MediaGraphTopologyProperties(msrest.serialization.Model): - """Describes the properties of a graph topology. - - :param description: An optional description for the instance. - :type description: str - :param parameters: An optional description for the instance. - :type parameters: list[~azure.media.lva.edge.models.MediaGraphParameterDeclaration] - :param sources: An optional description for the instance. - :type sources: list[~azure.media.lva.edge.models.MediaGraphSource] - :param processors: An optional description for the instance. - :type processors: list[~azure.media.lva.edge.models.MediaGraphProcessor] - :param sinks: name. - :type sinks: list[~azure.media.lva.edge.models.MediaGraphSink] - """ - - _attribute_map = { - 'description': {'key': 'description', 'type': 'str'}, - 'parameters': {'key': 'parameters', 'type': '[MediaGraphParameterDeclaration]'}, - 'sources': {'key': 'sources', 'type': '[MediaGraphSource]'}, - 'processors': {'key': 'processors', 'type': '[MediaGraphProcessor]'}, - 'sinks': {'key': 'sinks', 'type': '[MediaGraphSink]'}, - } - - def __init__( - self, - *, - description: Optional[str] = None, - parameters: Optional[List["MediaGraphParameterDeclaration"]] = None, - sources: Optional[List["MediaGraphSource"]] = None, - processors: Optional[List["MediaGraphProcessor"]] = None, - sinks: Optional[List["MediaGraphSink"]] = None, - **kwargs - ): - super(MediaGraphTopologyProperties, self).__init__(**kwargs) - self.description = description - self.parameters = parameters - self.sources = sources - self.processors = processors - self.sinks = sinks - - -class MediaGraphTopologySetRequest(OperationBase): - """MediaGraphTopologySetRequest. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param graph: Required. Describes a graph topology. - :type graph: ~azure.media.lva.edge.models.MediaGraphTopology - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'graph': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'graph': {'key': 'graph', 'type': 'MediaGraphTopology'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - graph: "MediaGraphTopology", - **kwargs - ): - super(MediaGraphTopologySetRequest, self).__init__(**kwargs) - self.method_name = 'GraphTopologySet' # type: str - self.graph = graph - - -class MediaGraphTopologySetRequestBody(MediaGraphTopology, OperationBase): - """MediaGraphTopologySetRequestBody. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar method_name: method name.Constant filled by server. - :vartype method_name: str - :ivar api_version: api version. Default value: "1.0". - :vartype api_version: str - :param name: Required. name. - :type name: str - :param system_data: Graph system data. - :type system_data: ~azure.media.lva.edge.models.MediaGraphSystemData - :param properties: Describes the properties of a graph topology. - :type properties: ~azure.media.lva.edge.models.MediaGraphTopologyProperties - """ - - _validation = { - 'method_name': {'readonly': True}, - 'api_version': {'constant': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'method_name': {'key': 'methodName', 'type': 'str'}, - 'api_version': {'key': '@apiVersion', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'system_data': {'key': 'systemData', 'type': 'MediaGraphSystemData'}, - 'properties': {'key': 'properties', 'type': 'MediaGraphTopologyProperties'}, - } - - api_version = "1.0" - - def __init__( - self, - *, - name: str, - system_data: Optional["MediaGraphSystemData"] = None, - properties: Optional["MediaGraphTopologyProperties"] = None, - **kwargs - ): - super(MediaGraphTopologySetRequestBody, self).__init__(name=name, system_data=system_data, properties=properties, **kwargs) - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.method_name = 'MediaGraphTopologySetRequestBody' # type: str - self.name = name - self.system_data = system_data - self.properties = properties - - -class MediaGraphUnsecuredEndpoint(MediaGraphEndpoint): - """An endpoint that the media graph can connect to, with no encryption in transit. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param credentials: Polymorphic credentials to be presented to the endpoint. - :type credentials: ~azure.media.lva.edge.models.MediaGraphCredentials - :param url: Required. Url for the endpoint. - :type url: str - """ - - _validation = { - 'type': {'required': True}, - 'url': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'MediaGraphCredentials'}, - 'url': {'key': 'url', 'type': 'str'}, - } - - def __init__( - self, - *, - url: str, - credentials: Optional["MediaGraphCredentials"] = None, - **kwargs - ): - super(MediaGraphUnsecuredEndpoint, self).__init__(credentials=credentials, url=url, **kwargs) - self.type = '#Microsoft.Media.MediaGraphUnsecuredEndpoint' # type: str - - -class MediaGraphUsernamePasswordCredentials(MediaGraphCredentials): - """Username/password credential pair. - - All required parameters must be populated in order to send to Azure. - - :param type: Required. The discriminator for derived types.Constant filled by server. - :type type: str - :param username: Required. Username for a username/password pair. - :type username: str - :param password: Password for a username/password pair. - :type password: str - """ - - _validation = { - 'type': {'required': True}, - 'username': {'required': True}, - } - - _attribute_map = { - 'type': {'key': '@type', 'type': 'str'}, - 'username': {'key': 'username', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - } - - def __init__( - self, - *, - username: str, - password: Optional[str] = None, - **kwargs - ): - super(MediaGraphUsernamePasswordCredentials, self).__init__(**kwargs) - self.type = '#Microsoft.Media.MediaGraphUsernamePasswordCredentials' # type: str - self.username = username - self.password = password diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py b/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py deleted file mode 100644 index f95f18986f48..000000000000 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_version.py +++ /dev/null @@ -1,7 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------- - -VERSION = '0.0.1' diff --git a/sdk/media/azure-media-lva-edge/dev_requirements.txt b/sdk/media/azure-media-lva-edge/dev_requirements.txt deleted file mode 100644 index 08bcfb306787..000000000000 --- a/sdk/media/azure-media-lva-edge/dev_requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -../../core/azure-core --e ../../../tools/azure-devtools --e ../../../tools/azure-sdk-tools --e ../../identity/azure-identity -aiohttp>=3.0; python_version >= '3.5' -aiodns>=2.0; python_version >= '3.5' -msrest>=0.6.10 -pytest==5.4.2 -tox>=3.20.0 -tox-monorepo>=0.1.2 -pytest-asyncio==0.12.0 diff --git a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py b/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py deleted file mode 100644 index c894b9b71a09..000000000000 --- a/sdk/media/azure-media-lva-edge/samples/sample_conditional_async.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import asyncio -import os -from colorama import init, Style, Fore -init() - -from azure.identity.aio import DefaultAzureCredential -from azure.learnappconfig.aio import AppConfigurationClient -from azure.core.exceptions import ResourceNotFoundError, ResourceNotModifiedError -from azure.core import MatchConditions - - -async def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - async with AppConfigurationClient(account_url=url, credential=credential) as client: - - # Retrieve initial color value - try: - first_color = await client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - except ResourceNotFoundError: - raise - - # Get latest color value, only if it has changed - try: - new_color = await client.get_configuration_setting( - key=os.environ['API-LEARN_SETTING_COLOR_KEY'], - match_condition=MatchConditions.IfModified, - etag=first_color.etag - ) - except ResourceNotModifiedError: - new_color = first_color - - color = getattr(Fore, new_color.value.upper()) - greeting = 'Hello!' - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - loop = asyncio.get_event_loop() - loop.run_until_complete(main()) diff --git a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py b/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py deleted file mode 100644 index f6fa6e0686fd..000000000000 --- a/sdk/media/azure-media-lva-edge/samples/sample_hello_world.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 - -# ------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -------------------------------------------------------------------------- - -import os -from colorama import init, Style, Fore -init() - -from azure.identity import DefaultAzureCredential -from azure.learnappconfig import AppConfigurationClient - -def main(): - url = os.environ.get('API-LEARN_ENDPOINT') - credential = DefaultAzureCredential() - client = AppConfigurationClient(account_url=url, credential=credential) - - try: - color_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_COLOR_KEY']) - color = color_setting.value.upper() - text_setting = client.get_configuration_setting(os.environ['API-LEARN_SETTING_TEXT_KEY']) - greeting = text_setting.value - except: - color = 'RED' - greeting = 'Default greeting' - - color = getattr(Fore, color) - print(f'{color}{greeting}{Style.RESET_ALL}') - - -if __name__ == "__main__": - main() diff --git a/sdk/media/azure-media-lva-edge/samples/sample_lva.py b/sdk/media/azure-media-lva-edge/samples/sample_lva.py deleted file mode 100644 index 9ac9ca9a817a..000000000000 --- a/sdk/media/azure-media-lva-edge/samples/sample_lva.py +++ /dev/null @@ -1,83 +0,0 @@ - -import json -import os -from azure.media.lva.edge._generated.models import * -from azure.iot.hub import IoTHubRegistryManager -from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult -from datetime import time - -device_id = "lva-sample-device" -module_d = "lvaEdge" -connection_string = "HostName=lvasamplehub77xvrvtar2bpw.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=o77hgzsswnBZsaGKVSDjSmm53m4ViJb/s1xv9zfDCi0=" -graph_instance_name = "graphInstance1" -graph_topology_name = "graphTopology1" - - -def build_graph_topology(): - graph_properties = MediaGraphTopologyProperties() - graph_properties.description = "Continuous video recording to an Azure Media Services Asset" - user_name_param = MediaGraphParameterDeclaration(name="rtspUserName",type="String",default="dummyusername") - password_param = MediaGraphParameterDeclaration(name="rtspPassword",type="String",default="dummypassword") - url_param = MediaGraphParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com") - - source = MediaGraphRtspSource(name="rtspSource", endpoint=MediaGraphUnsecuredEndpoint(url="${rtspUrl}",credentials=MediaGraphUsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}"))) - node = MediaGraphNodeInput(node_name="rtspSource") - sink = MediaGraphAssetSink(name="assetsink", inputs=[node],asset_name_pattern='sampleAsset-${System.GraphTopologyName}-${System.GraphInstanceName}', segment_length="PT0H0M30S",local_media_cache_maximum_size_mi_b=2048,local_media_cache_path="/var/lib/azuremediaservices/tmp/") - graph_properties.parameters = [user_name_param, password_param, url_param] - graph_properties.sources = [source] - graph_properties.sinks = [sink] - graph = MediaGraphTopology(name=graph_topology_name,properties=graph_properties) - - return graph - -def build_graph_instance(): - url_param = MediaGraphParameterDefinition(name="rtspUrl", value="rtsp://rtspsim:554/media/camera-300s.mkv") - graph_instance_properties = MediaGraphInstanceProperties(description="Sample graph description", topology_name=graph_topology_name, parameters=[url_param]) - - graph_instance = MediaGraphInstance(name=graph_instance_name, properties=graph_instance_properties) - - return graph_instance - -def invoke_method(method): - direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize()) - registry_manager = IoTHubRegistryManager(connection_string) - - return registry_manager.invoke_device_module_method(device_id, module_d, direct_method) - -def main(): - graph_topology = build_graph_topology() - graph_instance = build_graph_instance() - - try: - set_graph = invoke_method(MediaGraphTopologySetRequest(graph=graph_topology)) - set_graph_result = MediaGraphTopology.deserialize(set_graph) - - list_graph = invoke_method(MediaGraphTopologyListRequest()) - list_graph_result = MediaGraphTopology.deserialize(list_graph) - - get_graph = invoke_method(MediaGraphTopologyGetRequest(name=graph_topology_name)) - get_graph_result = MediaGraphTopology.deserialize(get_graph) - - set_graph_instance = invoke_method(MediaGraphInstanceSetRequest(instance=graph_instance)) - set_graph_instance_result = MediaGraphInstance.deserialize(set_graph_instance) - - activate_graph_instance = invoke_method(MediaGraphInstanceActivateRequest(name=graph_instance_name)) - activate_graph_instance_result = MediaGraphInstance.deserialize(activate_graph_instance) - - get_graph_instance = invoke_method(MediaGraphInstanceGetRequest(name=graph_instance_name)) - get_graph_instance_result = MediaGraphInstance.deserialize(get_graph_instance) - - deactivate_graph_instance = invoke_method(MediaGraphInstanceDeActivateRequest(name=graph_instance_name)) - deactivate_graph_instance_result = MediaGraphInstance.deserialize(deactivate_graph_instance) - - delete_graph_instance = invoke_method(MediaGraphInstanceDeleteRequest(name=graph_instance_name)) - delete_graph_instance_result = MediaGraphInstance.deserialize(delete_graph_instance) - - delete_graph = invoke_method(MediaGraphTopologyDeleteRequest(name=graph_topology_name)) - delete_graph_result = MediaGraphTopology.deserialize(delete_graph) - - except Exception as ex: - print(ex) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/sdk_packaging.toml b/sdk/media/azure-media-lva-edge/sdk_packaging.toml deleted file mode 100644 index b366f78fb41b..000000000000 --- a/sdk/media/azure-media-lva-edge/sdk_packaging.toml +++ /dev/null @@ -1,4 +0,0 @@ -[packaging] -is_arm = false -need_msrestazure = false -auto_update = false diff --git a/sdk/media/azure-media-lva-edge/setup.py b/sdk/media/azure-media-lva-edge/setup.py deleted file mode 100644 index d4a8c12edcc6..000000000000 --- a/sdk/media/azure-media-lva-edge/setup.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python - -#------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -#-------------------------------------------------------------------------- - -import sys -import re -import os.path -from io import open -from setuptools import find_packages, setup - -# Change the PACKAGE_NAME only to change folder and different name -PACKAGE_NAME = "azure-media-lva-edge" -PACKAGE_PPRINT_NAME = "Azure Media Live Video Analytics Edge SDK" - -# a-b-c => a/b/c -package_folder_path = PACKAGE_NAME.replace('-', '/') -# a-b-c => a.b.c -namespace_name = PACKAGE_NAME.replace('-', '.') - -# azure v0.x is not compatible with this package -# azure v0.x used to have a __version__ attribute (newer versions don't) -try: - import azure - try: - ver = azure.__version__ - raise Exception( - 'This package is incompatible with azure=={}. '.format(ver) + - 'Uninstall it with "pip uninstall azure".' - ) - except AttributeError: - pass -except ImportError: - pass - -# Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, '_version.py'), 'r') as fd: - version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', - fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError('Cannot find version information') - -with open('README.md', encoding='utf-8') as f: - readme = f.read() -with open('CHANGELOG.md', encoding='utf-8') as f: - changelog = f.read() - -exclude_packages = [ - 'tests', - 'tests.*', - 'samples', - # Exclude packages that will be covered by PEP420 or nspkg - 'azure', - ] -if sys.version_info < (3, 5, 3): - exclude_packages.extend([ - '*.aio', - '*.aio.*' - ]) - -setup( - name=PACKAGE_NAME, - version=version, - description='Microsoft {} Library for Python'.format(PACKAGE_PPRINT_NAME), - long_description=readme + '\n\n' + changelog, - long_description_content_type='text/markdown', - license='MIT License', - author='Microsoft Corporation', - author_email='azpysdkhelp@microsoft.com', - url='https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/appconfiguration/azure-appconfiguration', - classifiers=[ - "Development Status :: 5 - Production/Stable", - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'License :: OSI Approved :: MIT License', - ], - zip_safe=False, - packages=find_packages(exclude=exclude_packages), - install_requires=[ - "msrest>=0.6.10", - "azure-core<2.0.0,>=1.0.0", - ], - extras_require={ - ":python_version<'3.0'": ['azure-nspkg'], - ":python_version<'3.4'": ['enum34>=1.0.4'], - ":python_version<'3.5'": ['typing'], - "async:python_version>='3.5'": [ - 'aiohttp>=3.0', - 'aiodns>=2.0' - ], - } -) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/swagger/README.md b/sdk/media/azure-media-lva-edge/swagger/README.md deleted file mode 100644 index 7880fc364c91..000000000000 --- a/sdk/media/azure-media-lva-edge/swagger/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# Azure Queue Storage for Python - -> see https://aka.ms/autorest - - -### Generation -```ps -cd -autorest --v3 --python README.md -``` - -### Settings -```yaml -require: C:\azure-rest-api-specs-pr\specification\mediaservices\data-plane\readme.md -output-folder: ../azure/media/lva/edge/_generated -namespace: azure.media.lva.edge -no-namespace-folders: true -license-header: MICROSOFT_MIT_NO_VERSION -enable-xml: false -vanilla: true -clear-output-folder: true -add-credentials: false -python: true -package-version: "1.0" -public-clients: false -``` diff --git a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json b/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json deleted file mode 100644 index 36b206ca6142..000000000000 --- a/sdk/media/azure-media-lva-edge/swagger/appconfiguration.json +++ /dev/null @@ -1,1239 +0,0 @@ -{ - "swagger": "2.0", - "info": { - "description": "Direct Methods for Live Video Analytics on IoT Edge.", - "version": "1.0.4", - "title": "Direct Methods for Live Video Analytics on IoT Edge", - "contact": { - "email": "amshelp@microsoft.com" - } - }, - "security": [ - { - "sharedAccessSignature": [] - } - ], - "paths": {}, - "securityDefinitions": { - "sharedAccessSignature": { - "type": "apiKey", - "name": "Authorization", - "in": "header" - } - }, - "definitions": { - "OperationBase": { - "type": "object", - "properties": { - "methodName": { - "type": "string", - "description": "method name", - "readOnly": true - }, - "@apiVersion": { - "type": "string", - "description": "api version", - "enum": [ - "1.0" - ], - "x-ms-enum": { - "name": "ApiVersionEnum", - "modelAsString": false - } - } - }, - "discriminator": "methodName" - }, - "MediaGraphTopologySetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "graph" - ], - "properties": { - "graph": { - "$ref": "#/definitions/MediaGraphTopology" - } - } - }, - "MediaGraphTopologySetRequestBody": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologySet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - }, - { - "$ref": "#/definitions/MediaGraphTopology" - } - ] - }, - "MediaGraphInstanceSetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceSet", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "instance" - ], - "properties": { - "instance": { - "$ref": "#/definitions/MediaGraphInstance" - } - } - }, - "ItemNonSetRequestBase": { - "type": "object", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ], - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "method name" - } - } - }, - "MediaGraphTopologyListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphTopologyGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphTopologyDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphTopologyDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceListRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceList", - "allOf": [ - { - "$ref": "#/definitions/OperationBase" - } - ] - }, - "MediaGraphInstanceGetRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceGet", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceActivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeActivateRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDeactivate", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstanceDeleteRequest": { - "type": "object", - "x-ms-discriminator-value": "GraphInstanceDelete", - "allOf": [ - { - "$ref": "#/definitions/ItemNonSetRequestBase" - } - ] - }, - "MediaGraphInstance": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphInstanceProperties" - } - }, - "description": "Represents a Media Graph instance." - }, - "MediaGraphInstanceProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "topologyName": { - "type": "string", - "description": "The name of the graph topology that this instance will run. A topology with this name should already have been set in the Edge module." - }, - "parameters": { - "type": "array", - "description": "List of one or more graph instance parameters.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDefinition" - } - }, - "state": { - "type": "string", - "description": "Allowed states for a graph Instance.", - "enum": [ - "Inactive", - "Activating", - "Active", - "Deactivating" - ], - "x-ms-enum": { - "name": "MediaGraphInstanceState", - "values": [ - { - "value": "Inactive", - "description": "Inactive state." - }, - { - "value": "Activating", - "description": "Activating state." - }, - { - "value": "Active", - "description": "Active state." - }, - { - "value": "Deactivating", - "description": "Deactivating state." - } - ], - "modelAsString": false - } - } - }, - "description": "Properties of a Media Graph instance." - }, - "MediaGraphParameterDefinition": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string", - "description": "Name of parameter as defined in the graph topology." - }, - "value": { - "type": "string", - "description": "Value of parameter." - } - }, - "description": "A key, value pair. The graph topology can be authored with certain values with parameters. Then, during graph instance creation, the value for that parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphInstanceCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph instances.", - "items": { - "$ref": "#/definitions/MediaGraphInstance" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph instance collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph instances." - }, - "MediaGraphTopologyCollection": { - "properties": { - "value": { - "type": "array", - "description": "Collection of graph topologies.", - "items": { - "$ref": "#/definitions/MediaGraphTopology" - } - }, - "@continuationToken": { - "type": "string", - "description": "Continuation token to use in subsequent calls to enumerate through the graph topologies collection (when the collection contains too many results to return in one response)." - } - }, - "description": "Collection of graph topologies." - }, - "MediaGraphTopology": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string", - "description": "name" - }, - "systemData": { - "$ref": "#/definitions/MediaGraphSystemData" - }, - "properties": { - "$ref": "#/definitions/MediaGraphTopologyProperties" - } - }, - "description": "Describes a graph topology." - }, - "MediaGraphTopologyProperties": { - "type": "object", - "properties": { - "description": { - "type": "string", - "description": "An optional description for the instance." - }, - "parameters": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphParameterDeclaration" - } - }, - "sources": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphSource" - } - }, - "processors": { - "type": "array", - "description": "An optional description for the instance.", - "items": { - "$ref": "#/definitions/MediaGraphProcessor" - } - }, - "sinks": { - "description": "name", - "type": "array", - "items": { - "$ref": "#/definitions/MediaGraphSink" - } - } - }, - "description": "Describes the properties of a graph topology." - }, - "MediaGraphSystemData": { - "type": "object", - "properties": { - "createdAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource creation (UTC)." - }, - "lastModifiedAt": { - "type": "string", - "format": "date-time", - "description": "The timestamp of resource last modification (UTC)." - } - }, - "description": "Graph system data." - }, - "MediaGraphParameterDeclaration": { - "type": "object", - "required": [ - "name", - "type" - ], - "properties": { - "name": { - "type": "string", - "description": "The name of the parameter.", - "maxLength": 64 - }, - "type": { - "type": "string", - "description": "name", - "enum": [ - "String", - "SecretString", - "Int", - "Double", - "Bool" - ], - "x-ms-enum": { - "name": "MediaGraphParameterType", - "values": [ - { - "value": "String", - "description": "A string parameter value." - }, - { - "value": "SecretString", - "description": "A string to hold sensitive information as parameter value." - }, - { - "value": "Int", - "description": "A 32-bit signed integer as parameter value." - }, - { - "value": "Double", - "description": "A 64-bit double-precision floating point type as parameter value." - }, - { - "value": "Bool", - "description": "A boolean value that is either true or false." - } - ], - "modelAsString": false - } - }, - "description": { - "type": "string", - "description": "Description of the parameter." - }, - "default": { - "type": "string", - "description": "The default value for the parameter, to be used if the graph instance does not specify a value." - } - }, - "description": "The declaration of a parameter in the graph topology. A graph topology can be authored with parameters. Then, during graph instance creation, the value for those parameters can be specified. This allows the same graph topology to be used as a blueprint for multiple graph instances with different values for the parameters." - }, - "MediaGraphSource": { - "type": "object", - "required": [ - "@type", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The type of the source node. The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name to be used for this source node." - } - }, - "description": "Media graph source." - }, - "MediaGraphRtspSource": { - "properties": { - "transport": { - "type": "string", - "description": "Underlying RTSP transport. This is used to enable or disable HTTP tunneling.", - "enum": [ - "Http", - "Tcp" - ], - "x-ms-enum": { - "name": "MediaGraphRtspTransport", - "values": [ - { - "value": "Http", - "description": "HTTP/HTTPS transport. This should be used when HTTP tunneling is desired." - }, - { - "value": "Tcp", - "description": "TCP transport. This should be used when HTTP tunneling is NOT desired." - } - ], - "modelAsString": true - } - }, - "endpoint": { - "description": "RTSP endpoint of the stream that is being connected to.", - "$ref": "#/definitions/MediaGraphEndpoint" - } - }, - "required": [ - "endpoint" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to capture media from a RTSP server.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphRtspSource" - }, - "MediaGraphIoTHubMessageSource": { - "properties": { - "hubInputName": { - "type": "string", - "description": "Name of the input path where messages can be routed to (via routes declared in the IoT Edge deployment manifest)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSource" - }, - {} - ], - "description": "Enables a graph to receive messages via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSource" - }, - "MediaGraphIoTHubMessageSink": { - "properties": { - "hubOutputName": { - "type": "string", - "description": "Name of the output path to which the graph will publish message. These messages can then be delivered to desired destinations by declaring routes referencing the output path in the IoT Edge deployment manifest." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to publish messages that can be delivered via routes declared in the IoT Edge deployment manifest.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphIoTHubMessageSink" - }, - "MediaGraphEndpoint": { - "type": "object", - "required": [ - "@type", - "url" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "credentials": { - "description": "Polymorphic credentials to be presented to the endpoint.", - "$ref": "#/definitions/MediaGraphCredentials" - }, - "url": { - "type": "string", - "description": "Url for the endpoint." - } - }, - "description": "Base class for endpoints." - }, - "MediaGraphCredentials": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Credentials to present during authentication." - }, - "MediaGraphUsernamePasswordCredentials": { - "properties": { - "username": { - "type": "string", - "description": "Username for a username/password pair." - }, - "password": { - "type": "string", - "description": "Password for a username/password pair." - } - }, - "required": [ - "username" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Username/password credential pair.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials" - }, - "MediaGraphHttpHeaderCredentials": { - "properties": { - "headerName": { - "type": "string", - "description": "HTTP header name." - }, - "headerValue": { - "type": "string", - "description": "HTTP header value." - } - }, - "required": [ - "headerName", - "headerValue" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCredentials" - }, - {} - ], - "description": "Http header service credentials.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpHeaderCredentials" - }, - "MediaGraphUnsecuredEndpoint": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the media graph can connect to, with no encryption in transit.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphUnsecuredEndpoint" - }, - "MediaGraphTlsEndpoint": { - "properties": { - "trustedCertificates": { - "description": "Trusted certificates when authenticating a TLS connection. Null designates that Azure Media Service's source of trust should be used.", - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - "validationOptions": { - "description": "Validation options to use when authenticating a TLS connection. By default, strict validation is used.", - "$ref": "#/definitions/MediaGraphTlsValidationOptions" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphEndpoint" - }, - {} - ], - "description": "An endpoint that the graph can connect to, which must be connected over TLS/SSL.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphTlsEndpoint" - }, - "MediaGraphCertificateSource": { - "type": "object", - "required": [ - "@type" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Base class for certificate sources." - }, - "MediaGraphTlsValidationOptions": { - "type": "object", - "properties": { - "ignoreHostname": { - "type": "string", - "description": "Boolean value ignoring the host name (common name) during validation." - }, - "ignoreSignature": { - "type": "string", - "description": "Boolean value ignoring the integrity of the certificate chain at the current time." - } - }, - "description": "Options for controlling the authentication of TLS endpoints." - }, - "MediaGraphPemCertificateList": { - "properties": { - "certificates": { - "type": "array", - "description": "PEM formatted public certificates one per entry.", - "items": { - "type": "string" - } - } - }, - "required": [ - "certificates" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphCertificateSource" - }, - {} - ], - "description": "A list of PEM formatted certificates.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphPemCertificateList" - }, - "MediaGraphSink": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "Name to be used for the media graph sink." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this sink node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "Enables a media graph to write media data to a destination outside of the Live Video Analytics IoT Edge module." - }, - "MediaGraphNodeInput": { - "type": "object", - "properties": { - "nodeName": { - "type": "string", - "description": "The name of another node in the media graph, the output of which is used as input to this node." - }, - "outputSelectors": { - "type": "array", - "description": "Allows for the selection of particular streams from another node.", - "items": { - "$ref": "#/definitions/MediaGraphOutputSelector" - } - } - }, - "description": "Represents the input to any node in a media graph." - }, - "MediaGraphOutputSelector": { - "properties": { - "property": { - "type": "string", - "description": "The stream property to compare with.", - "enum": [ - "mediaType" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorProperty", - "values": [ - { - "value": "mediaType", - "description": "The stream's MIME type or subtype." - } - ], - "modelAsString": false - } - }, - "operator": { - "type": "string", - "description": "The operator to compare streams by.", - "enum": [ - "is", - "isNot" - ], - "x-ms-enum": { - "name": "MediaGraphOutputSelectorOperator", - "values": [ - { - "value": "is", - "description": "A media type is the same type or a subtype." - }, - { - "value": "isNot", - "description": "A media type is not the same type or a subtype." - } - ], - "modelAsString": false - } - }, - "value": { - "type": "string", - "description": "Value to compare against." - } - }, - "description": "Allows for the selection of particular streams from another node." - }, - "MediaGraphFileSink": { - "properties": { - "filePathPattern": { - "type": "string", - "description": "Absolute file path pattern for creating new files on the Edge device.", - "minLength": 1 - } - }, - "required": [ - "filePathPattern" - ], - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a media graph to write/store media (video and audio) to a file on the Edge device.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFileSink" - }, - "MediaGraphAssetSink": { - "properties": { - "assetNamePattern": { - "type": "string", - "description": "A name pattern when creating new assets." - }, - "segmentLength": { - "type": "string", - "format": "duration", - "example": "PT30S", - "description": "When writing media to an asset, wait until at least this duration of media has been accumulated on the Edge. Expressed in increments of 30 seconds, with a minimum of 30 seconds and a recommended maximum of 5 minutes." - }, - "localMediaCachePath": { - "type": "string", - "description": "Path to a local file system directory for temporary caching of media, before writing to an Asset. Used when the Edge device is temporarily disconnected from Azure." - }, - "localMediaCacheMaximumSizeMiB": { - "type": "string", - "description": "Maximum amount of disk space that can be used for temporary caching of media." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphSink" - }, - {} - ], - "description": "Enables a graph to record media to an Azure Media Services asset, for subsequent playback.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphAssetSink" - }, - "MediaGraphProcessor": { - "type": "object", - "required": [ - "@type", - "inputs", - "name" - ], - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - }, - "name": { - "type": "string", - "description": "The name for this processor node." - }, - "inputs": { - "type": "array", - "description": "An array of the names of the other nodes in the media graph, the outputs of which are used as input for this processor node.", - "items": { - "$ref": "#/definitions/MediaGraphNodeInput" - } - } - }, - "description": "A node that represents the desired processing of media in a graph. Takes media and/or events as inputs, and emits media and/or event as output." - }, - "MediaGraphMotionDetectionProcessor": { - "properties": { - "sensitivity": { - "type": "string", - "description": "Enumeration that specifies the sensitivity of the motion detection processor.", - "enum": [ - "Low", - "Medium", - "High" - ], - "x-ms-enum": { - "name": "MediaGraphMotionDetectionSensitivity", - "values": [ - { - "value": "Low", - "description": "Low Sensitivity." - }, - { - "value": "Medium", - "description": "Medium Sensitivity." - }, - { - "value": "High", - "description": "High Sensitivity." - } - ], - "modelAsString": true - } - }, - "outputMotionRegion": { - "type": "boolean", - "description": "Indicates whether the processor should detect and output the regions, within the video frame, where motion was detected. Default is true." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A node that accepts raw video as input, and detects if there are moving objects present. If so, then it emits an event, and allows frames where motion was detected to pass through. Other frames are blocked/dropped.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphMotionDetectionProcessor" - }, - "MediaGraphExtensionProcessorBase": { - "properties": { - "endpoint": { - "description": "Endpoint to which this processor should connect.", - "$ref": "#/definitions/MediaGraphEndpoint" - }, - "image": { - "description": "Describes the parameters of the image that is sent as input to the endpoint.", - "$ref": "#/definitions/MediaGraphImage" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Processor that allows for extensions, outside of the Live Video Analytics Edge module, to be integrated into the graph. It is the base class for various different kinds of extension processor types.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphExtensionProcessorBase" - }, - "MediaGraphCognitiveServicesVisionExtension": { - "properties": {}, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - } - ], - "description": "A processor that allows the media graph to send video frames to a Cognitive Services Vision extension. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphCognitiveServicesVisionExtension" - }, - "MediaGraphGrpcExtension": { - "required": [ - "dataTransfer" - ], - "properties": { - "dataTransfer": { - "description": "How media should be transferred to the inferencing engine.", - "$ref": "#/definitions/MediaGraphGrpcExtensionDataTransfer" - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames to an external inference container over a gRPC connection. This can be done using shared memory (for high frame rates), or over the network. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphGrpcExtensionDataTransfer": { - "required": [ - "mode" - ], - "properties": { - "sharedMemorySizeMiB": { - "type": "string", - "description": "The size of the buffer for all in-flight frames in mebibytes if mode is SharedMemory. Should not be specificed otherwise." - }, - "mode": { - "type": "string", - "description": "How frame data should be transmitted to the inferencing engine.", - "enum": [ - "Embedded", - "SharedMemory" - ], - "x-ms-enum": { - "name": "MediaGraphGrpcExtensionDataTransferMode", - "values": [ - { - "value": "Embedded", - "description": "Frames are transferred embedded into the gRPC messages." - }, - { - "value": "SharedMemory", - "description": "Frames are transferred through shared memory." - } - ], - "modelAsString": true - } - } - }, - "description": "Describes how media should be transferred to the inferencing engine.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphGrpcExtension" - }, - "MediaGraphHttpExtension": { - "allOf": [ - { - "$ref": "#/definitions/MediaGraphExtensionProcessorBase" - }, - {} - ], - "description": "A processor that allows the media graph to send video frames (mostly at low frame rates e.g. <5 fps) to an external inference container over an HTTP-based RESTful API. Inference results are relayed to downstream nodes.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphHttpExtension" - }, - "MediaGraphImage": { - "type": "object", - "properties": { - "scale": { - "$ref": "#/definitions/MediaGraphImageScale" - }, - "format": { - "$ref": "#/definitions/MediaGraphImageFormat" - } - }, - "description": "Describes the properties of an image frame." - }, - "MediaGraphImageScale": { - "type": "object", - "properties": { - "mode": { - "type": "string", - "description": "Describes the modes for scaling an input video frame into an image, before it is sent to an inference engine.", - "enum": [ - "PreserveAspectRatio", - "Pad", - "Stretch" - ], - "x-ms-enum": { - "name": "MediaGraphImageScaleMode", - "values": [ - { - "value": "PreserveAspectRatio", - "description": "Use the same aspect ratio as the input frame." - }, - { - "value": "Pad", - "description": "Center pad the input frame to match the given dimensions." - }, - { - "value": "Stretch", - "description": "Stretch input frame to match given dimensions." - } - ], - "modelAsString": true - } - }, - "width": { - "type": "string", - "description": "The desired output width of the image." - }, - "height": { - "type": "string", - "description": "The desired output height of the image." - } - }, - "description": "The scaling mode for the image." - }, - "MediaGraphImageFormat": { - "required": [ - "@type" - ], - "type": "object", - "discriminator": "@type", - "properties": { - "@type": { - "type": "string", - "description": "The discriminator for derived types." - } - }, - "description": "Encoding settings for an image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormat" - }, - "MediaGraphImageFormatRaw": { - "properties": { - "pixelFormat": { - "type": "string", - "description": "pixel format", - "enum": [ - "Yuv420p", - "Rgb565be", - "Rgb565le", - "Rgb555be", - "Rgb555le", - "Rgb24", - "Bgr24", - "Argb", - "Rgba", - "Abgr", - "Bgra" - ], - "x-ms-enum": { - "name": "MediaGraphImageFormatRawPixelFormat", - "values": [ - { - "value": "Yuv420p", - "description": "Planar YUV 4:2:0, 12bpp, (1 Cr and Cb sample per 2x2 Y samples)." - }, - { - "value": "Rgb565be", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian." - }, - { - "value": "Rgb565le", - "description": "Packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian." - }, - { - "value": "Rgb555be", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined." - }, - { - "value": "Rgb555le", - "description": "Packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined." - }, - { - "value": "Rgb24", - "description": "Packed RGB 8:8:8, 24bpp, RGBRGB." - }, - { - "value": "Bgr24", - "description": "Packed RGB 8:8:8, 24bpp, BGRBGR." - }, - { - "value": "Argb", - "description": "Packed ARGB 8:8:8:8, 32bpp, ARGBARGB." - }, - { - "value": "Rgba", - "description": "Packed RGBA 8:8:8:8, 32bpp, RGBARGBA." - }, - { - "value": "Abgr", - "description": "Packed ABGR 8:8:8:8, 32bpp, ABGRABGR." - }, - { - "value": "Bgra", - "description": "Packed BGRA 8:8:8:8, 32bpp, BGRABGRA." - } - ], - "modelAsString": true - } - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Encoding settings for raw images.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatRaw" - }, - "MediaGraphImageFormatEncoded": { - "properties": { - "encoding": { - "type": "string", - "description": "The different encoding formats that can be used for the image.", - "default": "Jpeg", - "enum": [ - "Jpeg", - "Bmp", - "Png" - ], - "x-ms-enum": { - "name": "MediaGraphImageEncodingFormat", - "values": [ - { - "value": "Jpeg", - "description": "JPEG image format." - }, - { - "value": "Bmp", - "description": "BMP image format." - }, - { - "value": "Png", - "description": "PNG image format." - } - ], - "modelAsString": true - } - }, - "quality": { - "type": "string", - "description": "The image quality (used for JPEG only). Value must be between 0 to 100 (best quality)." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphImageFormat" - }, - {} - ], - "description": "Allowed formats for the image.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphImageFormatEncoded" - }, - "MediaGraphSignalGateProcessor": { - "properties": { - "activationEvaluationWindow": { - "type": "string", - "example": "PT1.0S", - "description": "The period of time over which the gate gathers input events, before evaluating them." - }, - "activationSignalOffset": { - "type": "string", - "example": "-PT1.0S", - "description": "Signal offset once the gate is activated (can be negative). It is an offset between the time the event is received, and the timestamp of the first media sample (eg. video frame) that is allowed through by the gate." - }, - "minimumActivationTime": { - "type": "string", - "example": "PT1S", - "description": "The minimum period for which the gate remains open, in the absence of subsequent triggers (events)." - }, - "maximumActivationTime": { - "type": "string", - "example": "PT2S", - "description": "The maximum period for which the gate remains open, in the presence of subsequent events." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "A signal gate determines when to block (gate) incoming media, and when to allow it through. It gathers input events over the activationEvaluationWindow, and determines whether to open or close the gate.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphSignalGateProcessor" - }, - "MediaGraphFrameRateFilterProcessor": { - "properties": { - "maximumFps": { - "type": "string", - "description": "Ensures that the frame rate of the video leaving this processor does not exceed this limit." - } - }, - "allOf": [ - { - "$ref": "#/definitions/MediaGraphProcessor" - }, - {} - ], - "description": "Limits the frame rate on the input video stream based on the maximumFps property.", - "x-ms-discriminator-value": "#Microsoft.Media.MediaGraphFrameRateFilterProcessor" - } - } -} diff --git a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt b/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt deleted file mode 100644 index 0290e6671f32..000000000000 --- a/sdk/media/azure-media-lva-edge/swagger/commandOutput.txt +++ /dev/null @@ -1,158 +0,0 @@ -AutoRest code generation utility [cli version: 3.0.6247; node: v12.16.1, max-memory: 2048 gb] -(C) 2018 Microsoft Corporation. -https://aka.ms/autorest -NOTE: AutoRest core version selected from configuration: 3.0.6302. - Loading AutoRest core 'C:\Users\hivyas\.autorest\@autorest_core@3.0.6302\node_modules\@autorest\core\dist' (3.0.6302) - Loading AutoRest extension '@autorest/python' (5.1.0-preview.7->5.1.0-preview.7) - Loading AutoRest extension '@autorest/modelerfour' (4.15.400->4.15.400) - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphTopologyDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceListRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceGetRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeActivateRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphInstanceDeleteRequest' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphUnsecuredEndpoint' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphCognitiveServicesVisionExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/AllOfWhenYouMeantRef): Schema 'MediaGraphHttpExtension' is using an 'allOf' instead of a $ref. This creates a wasteful anonymous type when generating code. - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphInstanceCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTopologyCollection' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphRtspSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSource' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphIoTHubMessageSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUsernamePasswordCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpHeaderCredentials' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphUnsecuredEndpoint' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphTlsEndpoint' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphPemCertificateList' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphOutputSelector' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFileSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphAssetSink' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphMotionDetectionProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphExtensionProcessorBase' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphCognitiveServicesVisionExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtension' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphGrpcExtensionDataTransfer' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphHttpExtension' with an undefined type and 'allOf'/'anyOf'/'oneOf' is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatRaw' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphImageFormatEncoded' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphSignalGateProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/SchemaMissingType): The schema 'MediaGraphFrameRateFilterProcessor' with an undefined type and decalared properties is a bit ambigious. This has been auto-corrected to 'type:object' - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphRtspSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSource' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphIoTHubMessageSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphUsernamePasswordCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphHttpHeaderCredentials' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphTlsEndpoint' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphPemCertificateList' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFileSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphAssetSink' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphMotionDetectionProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphExtensionProcessorBase' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphGrpcExtension' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatRaw' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphImageFormatEncoded' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphSignalGateProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/EmptyParentSchemaWarning): Schema 'MediaGraphFrameRateFilterProcessor' has an allOf list with an empty object schema as a parent, removing it. - -WARNING (PreCheck/CheckDuplicateSchemas): Checking for duplicate schemas, this could take a (long) while. Run with --verbose for more detail. - -WARNING (Modeler/MissingType): The schema 'components·109p5kc·schemas·mediagraphrtspsource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·109p5kc·schemas·mediagraphrtspsource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1af9g39·schemas·mediagraphiothubmessagesource·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1jngw4h·schemas·mediagraphiothubmessagesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1mxkvbd·schemas·mediagraphusernamepasswordcredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1uqp1b7·schemas·mediagraphhttpheadercredentials·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·q7dsz6·schemas·mediagraphtlsendpoint·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·7b4k0z·schemas·mediagraphpemcertificatelist·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1nh92cj·schemas·mediagraphfilesink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1nh92cj·schemas·mediagraphfilesink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·z5bgs5·schemas·mediagraphassetsink·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·z5bgs5·schemas·mediagraphassetsink·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1vu24mc·schemas·mediagraphmotiondetectionprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1axip85·schemas·mediagraphextensionprocessorbase·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1yl8gs2·schemas·mediagraphgrpcextension·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1k6pka5·schemas·mediagraphimageformatraw·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1k6pka5·schemas·mediagraphimageformatraw·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·nnu6mb·schemas·mediagraphimageformatencoded·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·dx5boa·schemas·mediagraphsignalgateprocessor·allof·1 - -WARNING (Modeler/MissingType): The schema 'components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1' has no type or format information whatsoever. Location: - file:///C:/azure-sdk-for-python/sdk/api-learn/azure-learnappconfig/swagger/appconfiguration.json#/components/schemas/components·1hcm6ag·schemas·mediagraphframeratefilterprocessor·allof·1 -Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? Terminate batch job (Y/N)? \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py deleted file mode 100644 index 53b2dcb4ba92..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/_shared/asynctestcase.py +++ /dev/null @@ -1,79 +0,0 @@ -import asyncio -import functools -import os - -from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function -from devtools_testutils.azure_testcase import _is_autorest_v3 - -from .testcase import AppConfigTestCase - -class AsyncAppConfigTestCase(AppConfigTestCase): - def __init__(self, *args, **kwargs): - super(AppConfigTestCase, self).__init__(*args, **kwargs) - - class AsyncFakeCredential(object): - # fake async credential - async def get_token(self, *scopes, **kwargs): - return AccessToken('fake_token', 2527537086) - - async def close(self): - pass - - def create_basic_client(self, client_class, **kwargs): - # This is the patch for creating client using aio identity - - tenant_id = os.environ.get("AZURE_TENANT_ID", None) - client_id = os.environ.get("AZURE_CLIENT_ID", None) - secret = os.environ.get("AZURE_CLIENT_SECRET", None) - - if tenant_id and client_id and secret and self.is_live: - if _is_autorest_v3(client_class): - # Create azure-identity class using aio credential - from azure.identity.aio import ClientSecretCredential - credentials = ClientSecretCredential( - tenant_id=tenant_id, - client_id=client_id, - client_secret=secret - ) - else: - # Create msrestazure class - from msrestazure.azure_active_directory import ServicePrincipalCredentials - credentials = ServicePrincipalCredentials( - tenant=tenant_id, - client_id=client_id, - secret=secret - ) - else: - if _is_autorest_v3(client_class): - credentials = self.AsyncFakeCredential() - #credentials = self.settings.get_azure_core_credentials() - else: - credentials = self.settings.get_credentials() - - # Real client creation - # FIXME decide what is the final argument for that - # if self.is_playback(): - # kwargs.setdefault("polling_interval", 0) - if _is_autorest_v3(client_class): - kwargs.setdefault("logging_enable", True) - client = client_class( - credential=credentials, - **kwargs - ) - else: - client = client_class( - credentials=credentials, - **kwargs - ) - - if self.is_playback(): - try: - client._config.polling_interval = 0 # FIXME in azure-mgmt-core, make this a kwargs - except AttributeError: - pass - - if hasattr(client, "config"): # Autorest v2 - if self.is_playback(): - client.config.long_running_operation_timeout = 0 - client.config.enable_http_logger = True - return client diff --git a/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py b/sdk/media/azure-media-lva-edge/tests/_shared/testcase.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/sdk/media/azure-media-lva-edge/tests/conftest.py b/sdk/media/azure-media-lva-edge/tests/conftest.py deleted file mode 100644 index c36aaed14908..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/conftest.py +++ /dev/null @@ -1,25 +0,0 @@ -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- diff --git a/sdk/media/azure-media-lva-edge/tests/test_app_config.py b/sdk/media/azure-media-lva-edge/tests/test_app_config.py deleted file mode 100644 index 5871ed8eef2f..000000000000 --- a/sdk/media/azure-media-lva-edge/tests/test_app_config.py +++ /dev/null @@ -1 +0,0 @@ -import pytest diff --git a/sdk/media/azure-media-nspkg/CHANGELOG.md b/sdk/media/azure-media-nspkg/CHANGELOG.md new file mode 100644 index 000000000000..b113bcbe0b14 --- /dev/null +++ b/sdk/media/azure-media-nspkg/CHANGELOG.md @@ -0,0 +1,3 @@ +# Release History + +## 1.0.0 (2020-12-10) diff --git a/sdk/media/azure-media-nspkg/MANIFEST.in b/sdk/media/azure-media-nspkg/MANIFEST.in new file mode 100644 index 000000000000..ce3c2ae7c20e --- /dev/null +++ b/sdk/media/azure-media-nspkg/MANIFEST.in @@ -0,0 +1,3 @@ +include *.md +include azure/__init__.py +include azure/media/__init__.py diff --git a/sdk/media/azure-media-nspkg/README.md b/sdk/media/azure-media-nspkg/README.md new file mode 100644 index 000000000000..c8f445a58562 --- /dev/null +++ b/sdk/media/azure-media-nspkg/README.md @@ -0,0 +1,12 @@ +# Microsoft Azure SDK for Python + +This is the Microsoft Azure Security Services namespace package. + +This package is not intended to be installed directly by the end user. + +Since version 3.0, this is Python 2 package only, Python 3.x SDKs will use `PEP420 ` as namespace package strategy. + +It provides the necessary files for other packages to extend the azure.security namespace. + + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fsdk%2Fsecurity%2Fazure-security-nspkg%2FREADME.png) \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py b/sdk/media/azure-media-nspkg/azure/__init__.py similarity index 80% rename from sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py rename to sdk/media/azure-media-nspkg/azure/__init__.py index 5960c353a898..69e3be50dac4 100644 --- a/sdk/media/azure-media-lva-edge/azure/media/lva/edge/_generated/__init__.py +++ b/sdk/media/azure-media-nspkg/azure/__init__.py @@ -1 +1 @@ -__path__ = __import__('pkgutil').extend_path(__path__, __name__) # type: ignore \ No newline at end of file +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-nspkg/azure/media/__init__.py b/sdk/media/azure-media-nspkg/azure/media/__init__.py new file mode 100644 index 000000000000..69e3be50dac4 --- /dev/null +++ b/sdk/media/azure-media-nspkg/azure/media/__init__.py @@ -0,0 +1 @@ +__path__ = __import__('pkgutil').extend_path(__path__, __name__) diff --git a/sdk/media/azure-media-nspkg/sdk_packaging.toml b/sdk/media/azure-media-nspkg/sdk_packaging.toml new file mode 100644 index 000000000000..e7687fdae93b --- /dev/null +++ b/sdk/media/azure-media-nspkg/sdk_packaging.toml @@ -0,0 +1,2 @@ +[packaging] +auto_update = false \ No newline at end of file diff --git a/sdk/media/azure-media-lva-edge/setup.cfg b/sdk/media/azure-media-nspkg/setup.cfg similarity index 100% rename from sdk/media/azure-media-lva-edge/setup.cfg rename to sdk/media/azure-media-nspkg/setup.cfg diff --git a/sdk/media/azure-media-nspkg/setup.py b/sdk/media/azure-media-nspkg/setup.py new file mode 100644 index 000000000000..c4d7f5a609a3 --- /dev/null +++ b/sdk/media/azure-media-nspkg/setup.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- +from setuptools import setup + +setup( + name='azure-media-nspkg', + version='1.0.0', + description='Microsoft Azure Media Namespace Package [Internal]', + long_description=open('README.md', 'r').read(), + license='MIT License', + author='Microsoft Corporation', + author_email='azpysdkhelp@microsoft.com', + url='https://github.com/Azure/azure-sdk-for-python', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'License :: OSI Approved :: MIT License', + ], + zip_safe=False, + packages=[ + 'azure.media' + ], + install_requires=[ + 'azure-nspkg>=3.0.0', + ] +) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 2d63019f2b80..7abc85c7831c 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,6 +30,4 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia - - name: azure_media_lva_edge - safeName: azuremedialvaedge From 45856f7e0fde3ecdab7769ef75900869727d51ad Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Thu, 10 Dec 2020 16:56:55 -0800 Subject: [PATCH 3/9] Update ci.yml --- sdk/media/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sdk/media/ci.yml b/sdk/media/ci.yml index 7abc85c7831c..c52044500767 100644 --- a/sdk/media/ci.yml +++ b/sdk/media/ci.yml @@ -30,4 +30,6 @@ extends: Artifacts: - name: azure_mgmt_media safeName: azuremgmtmedia + - name: azure_media_nspkg + safeName: azuremedianspkg From d276bb1ab69d65ee0c3ee2790c5dc31f58f2a0bc Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 08:38:02 -0800 Subject: [PATCH 4/9] changin security to media --- sdk/media/azure-media-nspkg/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/media/azure-media-nspkg/README.md b/sdk/media/azure-media-nspkg/README.md index c8f445a58562..4c7096141716 100644 --- a/sdk/media/azure-media-nspkg/README.md +++ b/sdk/media/azure-media-nspkg/README.md @@ -1,12 +1,12 @@ # Microsoft Azure SDK for Python -This is the Microsoft Azure Security Services namespace package. +This is the Microsoft Azure Media Services namespace package. This package is not intended to be installed directly by the end user. Since version 3.0, this is Python 2 package only, Python 3.x SDKs will use `PEP420 ` as namespace package strategy. -It provides the necessary files for other packages to extend the azure.security namespace. +It provides the necessary files for other packages to extend the azure.media namespace. ![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fsdk%2Fsecurity%2Fazure-security-nspkg%2FREADME.png) \ No newline at end of file From fc0c01feb8790b4f45ec9fa133788ead9c34dbbf Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Fri, 11 Dec 2020 08:45:37 -0800 Subject: [PATCH 5/9] Update README.md --- sdk/media/azure-media-nspkg/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-nspkg/README.md b/sdk/media/azure-media-nspkg/README.md index 4c7096141716..3a31e91063e1 100644 --- a/sdk/media/azure-media-nspkg/README.md +++ b/sdk/media/azure-media-nspkg/README.md @@ -9,4 +9,4 @@ Since version 3.0, this is Python 2 package only, Python 3.x SDKs will use `PEP4 It provides the necessary files for other packages to extend the azure.media namespace. -![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fsdk%2Fsecurity%2Fazure-security-nspkg%2FREADME.png) \ No newline at end of file +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fsdk%2Fmedia%2Fazure-media-nspkg%2FREADME.png) From be746da0a7dc3c214ff760e41ad145cddd853859 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 09:05:38 -0800 Subject: [PATCH 6/9] adding to docsettings --- eng/.docsettings.yml | 1 + sdk/media/azure-media-nspkg/CHANGELOG.md | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index dcae4c3970f8..f18b089da110 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -103,6 +103,7 @@ known_content_issues: - ['sdk/monitor/microsoft-opentelemetry-exporter-azuremonitor/swagger/README.md', '#4554'] - ['sdk/monitor/microsoft-opentelemetry-exporter-azuremonitor/README.md', '#4554'] - ['sdk/digitaltwins/azure-digitaltwins-core/swagger/README.md', '#4554'] + - ['sdk/media/azure-media-nspkg/README.md', '#4554'] # nspckg and common. - ['sdk/appconfiguration/azure-appconfiguration/README.md', 'nspkg and common'] diff --git a/sdk/media/azure-media-nspkg/CHANGELOG.md b/sdk/media/azure-media-nspkg/CHANGELOG.md index b113bcbe0b14..db70cf40ee76 100644 --- a/sdk/media/azure-media-nspkg/CHANGELOG.md +++ b/sdk/media/azure-media-nspkg/CHANGELOG.md @@ -1,3 +1,5 @@ # Release History -## 1.0.0 (2020-12-10) +## 1.0.b1 + +Initial release From 615ed350083705c7640f5cefad20b2ec085096fc Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:10:48 -0800 Subject: [PATCH 7/9] updating changelog version --- sdk/media/azure-media-nspkg/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-nspkg/CHANGELOG.md b/sdk/media/azure-media-nspkg/CHANGELOG.md index db70cf40ee76..d8ac58ec4024 100644 --- a/sdk/media/azure-media-nspkg/CHANGELOG.md +++ b/sdk/media/azure-media-nspkg/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.b1 +## 1.0.0 Initial release From ed9a43fc2ad134b4431bb8910adaef63c0830468 Mon Sep 17 00:00:00 2001 From: hivyas Date: Fri, 11 Dec 2020 10:28:02 -0800 Subject: [PATCH 8/9] adding unreleased rag --- sdk/media/azure-media-nspkg/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-nspkg/CHANGELOG.md b/sdk/media/azure-media-nspkg/CHANGELOG.md index d8ac58ec4024..b73c9a0d67d2 100644 --- a/sdk/media/azure-media-nspkg/CHANGELOG.md +++ b/sdk/media/azure-media-nspkg/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.0 +## 1.0.0 (Unreleased) Initial release From 1a69f1c10dd47c944a42c2e7cef37db7ad311650 Mon Sep 17 00:00:00 2001 From: Laurent Mazuel Date: Fri, 11 Dec 2020 10:32:55 -0800 Subject: [PATCH 9/9] Update CHANGELOG.md --- sdk/media/azure-media-nspkg/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/media/azure-media-nspkg/CHANGELOG.md b/sdk/media/azure-media-nspkg/CHANGELOG.md index b73c9a0d67d2..efe8edc2b76b 100644 --- a/sdk/media/azure-media-nspkg/CHANGELOG.md +++ b/sdk/media/azure-media-nspkg/CHANGELOG.md @@ -1,5 +1,5 @@ # Release History -## 1.0.0 (Unreleased) +## 1.0.0 (2020-12-11) Initial release