Skip to content

Commit

Permalink
feat(python): added tests
Browse files Browse the repository at this point in the history
  • Loading branch information
v.kozyar committed Feb 22, 2024
1 parent e7388f4 commit a1f6473
Show file tree
Hide file tree
Showing 7 changed files with 361 additions and 102 deletions.
6 changes: 3 additions & 3 deletions flipt-python/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

def test():
"""
Run all unittests. Equivalent to:
`poetry run python -m unittest tests`
Run all tests. Equivalent to:
`poetry run pytest tests`
"""
subprocess.run(["python", "-m", "unittest", "tests"])
subprocess.run(["python", "-m", "pytest", "tests"])
99 changes: 0 additions & 99 deletions flipt-python/tests/__init__.py
Original file line number Diff line number Diff line change
@@ -1,99 +0,0 @@
import os
import unittest
from flipt import FliptClient
from flipt.evaluation import BatchEvaluationRequest, EvaluationRequest
from flipt.authentication import ClientTokenAuthentication


class TestFliptEvaluationClient(unittest.TestCase):
def setUp(self) -> None:
flipt_url = os.environ.get("FLIPT_URL")
if flipt_url is None:
raise Exception("FLIPT_URL not set")

auth_token = os.environ.get("FLIPT_AUTH_TOKEN")
if auth_token is None:
raise Exception("FLIPT_AUTH_TOKEN not set")

self.flipt_client = FliptClient(
url=flipt_url, authentication=ClientTokenAuthentication(auth_token)
)

def test_variant(self):
variant = self.flipt_client.evaluation.variant(
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
)
)
self.assertTrue(variant.match)
self.assertEqual("flag1", variant.flag_key)
self.assertEqual("variant1", variant.variant_key)
self.assertEqual("MATCH_EVALUATION_REASON", variant.reason)
self.assertIn("segment1", variant.segment_keys)

def test_boolean(self):
boolean = self.flipt_client.evaluation.boolean(
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
)
)
self.assertTrue(boolean.enabled)
self.assertEqual("flag_boolean", boolean.flag_key)
self.assertEqual("MATCH_EVALUATION_REASON", boolean.reason)

def test_batch(self):
batch = self.flipt_client.evaluation.batch(
BatchEvaluationRequest(
requests=[
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="notfound",
entity_id="entity",
context={"fizz": "buzz"},
),
]
)
)

self.assertEqual(3, len(batch.responses))

# Variant
self.assertEqual("VARIANT_EVALUATION_RESPONSE_TYPE", batch.responses[0].type)
variant = batch.responses[0].variant_response
self.assertTrue(variant.match)
self.assertEqual("flag1", variant.flag_key)
self.assertEqual("variant1", variant.variant_key)
self.assertEqual("MATCH_EVALUATION_REASON", variant.reason)
self.assertIn("segment1", variant.segment_keys)

# Boolean
self.assertEqual("BOOLEAN_EVALUATION_RESPONSE_TYPE", batch.responses[1].type)
boolean = batch.responses[1].boolean_response
self.assertTrue(boolean.enabled)
self.assertEqual("flag_boolean", boolean.flag_key)
self.assertEqual("MATCH_EVALUATION_REASON", boolean.reason)

# Error
self.assertEqual("ERROR_EVALUATION_RESPONSE_TYPE", batch.responses[2].type)
error = batch.responses[2].error_response
self.assertEqual("notfound", error.flag_key)
self.assertEqual("default", error.namespace_key)
self.assertEqual("NOT_FOUND_ERROR_EVALUATION_REASON", error.reason)
33 changes: 33 additions & 0 deletions flipt-python/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import os

import pytest

from flipt import AsyncFliptClient, FliptClient
from flipt.authentication import ClientTokenAuthentication


@pytest.fixture(scope='session')
def flipt_url() -> str:
flipt_url = os.environ.get("FLIPT_URL")
if flipt_url is None:
raise Exception("FLIPT_URL not set")
return flipt_url


@pytest.fixture(scope='session')
def flipt_auth_token() -> str:
auth_token = os.environ.get("FLIPT_AUTH_TOKEN")
if auth_token is None:
raise Exception("FLIPT_AUTH_TOKEN not set")

return auth_token


@pytest.fixture(scope='session')
def sync_flipt_client(flipt_url, flipt_auth_token):
return FliptClient(url=flipt_url, authentication=ClientTokenAuthentication(flipt_auth_token))


@pytest.fixture()
def async_flipt_client(flipt_url, flipt_auth_token):
return AsyncFliptClient(url=flipt_url, authentication=ClientTokenAuthentication(flipt_auth_token))
Empty file.
33 changes: 33 additions & 0 deletions flipt-python/tests/evaluation/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from http import HTTPStatus

import pytest


@pytest.fixture(params=[{}, {'message': 'some error'}])
def _mock_variant_response_error(httpx_mock, flipt_url, request):
httpx_mock.add_response(
method="POST",
url=f'{flipt_url}/evaluate/v1/variant',
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
json=request.param,
)


@pytest.fixture(params=[{}, {'message': 'some error'}])
def _mock_boolean_response_error(httpx_mock, flipt_url, request):
httpx_mock.add_response(
method="POST",
url=f'{flipt_url}/evaluate/v1/boolean',
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
json=request.param,
)


@pytest.fixture(params=[{}, {'message': 'some error'}])
def _mock_batch_response_error(httpx_mock, flipt_url, request):
httpx_mock.add_response(
method="POST",
url=f'{flipt_url}/evaluate/v1/batch',
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
json=request.param,
)
146 changes: 146 additions & 0 deletions flipt-python/tests/evaluation/test_async_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
import pytest

from flipt.evaluation import BatchEvaluationRequest, EvaluationRequest
from flipt.exceptions import FliptApiError


async def test_variant(async_flipt_client):
variant = await async_flipt_client.evaluation.variant(
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
)
)

assert variant.match
assert variant.flag_key == 'flag1'
assert variant.variant_key == 'variant1'
assert variant.reason == 'MATCH_EVALUATION_REASON'
assert 'segment1' in variant.segment_keys


@pytest.mark.usefixtures('_mock_variant_response_error')
async def test_evaluate_variant_error(async_flipt_client):
with pytest.raises(FliptApiError):
await async_flipt_client.evaluation.variant(
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
)
)


async def test_boolean(async_flipt_client):
boolean = await async_flipt_client.evaluation.boolean(
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
)
)

assert boolean.enabled
assert boolean.flag_key == 'flag_boolean'
assert boolean.reason == 'MATCH_EVALUATION_REASON'


@pytest.mark.usefixtures('_mock_boolean_response_error')
async def test_evaluate_boolean_error(async_flipt_client):
with pytest.raises(FliptApiError):
await async_flipt_client.evaluation.boolean(
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
)
)


async def test_batch(async_flipt_client):
batch = await async_flipt_client.evaluation.batch(
BatchEvaluationRequest(
requests=[
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="notfound",
entity_id="entity",
context={"fizz": "buzz"},
),
]
)
)

assert len(batch.responses) == 3

# Variant
assert batch.responses[0].type == "VARIANT_EVALUATION_RESPONSE_TYPE"

variant = batch.responses[0].variant_response
assert variant.match
assert variant.flag_key == "flag1"
assert variant.variant_key == "variant1"
assert variant.reason == "MATCH_EVALUATION_REASON"
assert 'segment1' in variant.segment_keys

# Boolean
assert batch.responses[1].type == 'BOOLEAN_EVALUATION_RESPONSE_TYPE'

boolean = batch.responses[1].boolean_response
assert boolean.enabled
assert boolean.flag_key == "flag_boolean"
assert boolean.reason == "MATCH_EVALUATION_REASON"

# Error
assert batch.responses[2].type == 'ERROR_EVALUATION_RESPONSE_TYPE'

error = batch.responses[2].error_response
assert error.flag_key == "notfound"
assert error.namespace_key == "default"
assert error.reason == "NOT_FOUND_ERROR_EVALUATION_REASON"


@pytest.mark.usefixtures('_mock_batch_response_error')
async def test_evaluate_batch_error(async_flipt_client):
with pytest.raises(FliptApiError):
await async_flipt_client.evaluation.batch(
BatchEvaluationRequest(
requests=[
EvaluationRequest(
namespace_key="default",
flag_key="flag1",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="flag_boolean",
entity_id="entity",
context={"fizz": "buzz"},
),
EvaluationRequest(
namespace_key="default",
flag_key="notfound",
entity_id="entity",
context={"fizz": "buzz"},
),
]
)
)
Loading

0 comments on commit a1f6473

Please sign in to comment.