diff options
Diffstat (limited to '')
| -rw-r--r-- | tests/bot/cogs/sync/test_base.py | 4 | ||||
| -rw-r--r-- | tests/bot/cogs/test_information.py | 10 | ||||
| -rw-r--r-- | tests/bot/cogs/test_snekbox.py | 368 | ||||
| -rw-r--r-- | tests/bot/test_converters.py | 2 | ||||
| -rw-r--r-- | tests/bot/test_utils.py | 15 | 
5 files changed, 376 insertions, 23 deletions
| diff --git a/tests/bot/cogs/sync/test_base.py b/tests/bot/cogs/sync/test_base.py index d17a27409..fe0594efe 100644 --- a/tests/bot/cogs/sync/test_base.py +++ b/tests/bot/cogs/sync/test_base.py @@ -82,7 +82,7 @@ class SyncerSendPromptTests(unittest.IsolatedAsyncioTestCase):                  mock_()                  await self.syncer._send_prompt() -                method.assert_called_once_with(constants.Channels.devcore) +                method.assert_called_once_with(constants.Channels.dev_core)      async def test_send_prompt_returns_None_if_channel_fetch_fails(self):          """None should be returned if there's an HTTPException when fetching the channel.""" @@ -130,7 +130,7 @@ class SyncerConfirmationTests(unittest.IsolatedAsyncioTestCase):      def setUp(self):          self.bot = helpers.MockBot()          self.syncer = TestSyncer(self.bot) -        self.core_dev_role = helpers.MockRole(id=constants.Roles.core_developer) +        self.core_dev_role = helpers.MockRole(id=constants.Roles.core_developers)      @staticmethod      def get_message_reaction(emoji): diff --git a/tests/bot/cogs/test_information.py b/tests/bot/cogs/test_information.py index f5e937356..5693d2946 100644 --- a/tests/bot/cogs/test_information.py +++ b/tests/bot/cogs/test_information.py @@ -19,7 +19,7 @@ class InformationCogTests(unittest.TestCase):      @classmethod      def setUpClass(cls): -        cls.moderator_role = helpers.MockRole(name="Moderator", id=constants.Roles.moderator) +        cls.moderator_role = helpers.MockRole(name="Moderator", id=constants.Roles.moderators)      def setUp(self):          """Sets up fresh objects for each test.""" @@ -521,7 +521,7 @@ class UserCommandTests(unittest.TestCase):          """A regular user should not be able to use this command outside of bot-commands."""          constants.MODERATION_ROLES = [self.moderator_role.id]          constants.STAFF_ROLES = [self.moderator_role.id] -        constants.Channels.bot = 50 +        constants.Channels.bot_commands = 50          ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=100)) @@ -533,7 +533,7 @@ class UserCommandTests(unittest.TestCase):      def test_regular_user_may_use_command_in_bot_commands_channel(self, create_embed, constants):          """A regular user should be allowed to use `!user` targeting themselves in bot-commands."""          constants.STAFF_ROLES = [self.moderator_role.id] -        constants.Channels.bot = 50 +        constants.Channels.bot_commands = 50          ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=50)) @@ -546,7 +546,7 @@ class UserCommandTests(unittest.TestCase):      def test_regular_user_can_explicitly_target_themselves(self, create_embed, constants):          """A user should target itself with `!user` when a `user` argument was not provided."""          constants.STAFF_ROLES = [self.moderator_role.id] -        constants.Channels.bot = 50 +        constants.Channels.bot_commands = 50          ctx = helpers.MockContext(author=self.author, channel=helpers.MockTextChannel(id=50)) @@ -559,7 +559,7 @@ class UserCommandTests(unittest.TestCase):      def test_staff_members_can_bypass_channel_restriction(self, create_embed, constants):          """Staff members should be able to bypass the bot-commands channel restriction."""          constants.STAFF_ROLES = [self.moderator_role.id] -        constants.Channels.bot = 50 +        constants.Channels.bot_commands = 50          ctx = helpers.MockContext(author=self.moderator, channel=helpers.MockTextChannel(id=200)) diff --git a/tests/bot/cogs/test_snekbox.py b/tests/bot/cogs/test_snekbox.py new file mode 100644 index 000000000..985bc66a1 --- /dev/null +++ b/tests/bot/cogs/test_snekbox.py @@ -0,0 +1,368 @@ +import asyncio +import logging +import unittest +from functools import partial +from unittest.mock import MagicMock, Mock, call, patch + +from bot.cogs import snekbox +from bot.cogs.snekbox import Snekbox +from bot.constants import URLs +from tests.helpers import ( +    AsyncContextManagerMock, AsyncMock, MockBot, MockContext, MockMessage, MockReaction, MockUser, async_test +) + + +class SnekboxTests(unittest.TestCase): +    def setUp(self): +        """Add mocked bot and cog to the instance.""" +        self.bot = MockBot() + +        self.mocked_post = MagicMock() +        self.mocked_post.json = AsyncMock() +        self.bot.http_session.post = MagicMock(return_value=AsyncContextManagerMock(self.mocked_post)) + +        self.cog = Snekbox(bot=self.bot) + +    @async_test +    async def test_post_eval(self): +        """Post the eval code to the URLs.snekbox_eval_api endpoint.""" +        self.mocked_post.json.return_value = {'lemon': 'AI'} + +        self.assertEqual(await self.cog.post_eval("import random"), {'lemon': 'AI'}) +        self.bot.http_session.post.assert_called_once_with( +            URLs.snekbox_eval_api, +            json={"input": "import random"}, +            raise_for_status=True +        ) + +    @async_test +    async def test_upload_output_reject_too_long(self): +        """Reject output longer than MAX_PASTE_LEN.""" +        result = await self.cog.upload_output("-" * (snekbox.MAX_PASTE_LEN + 1)) +        self.assertEqual(result, "too long to upload") + +    @async_test +    async def test_upload_output(self): +        """Upload the eval output to the URLs.paste_service.format(key="documents") endpoint.""" +        key = "RainbowDash" +        self.mocked_post.json.return_value = {"key": key} + +        self.assertEqual( +            await self.cog.upload_output("My awesome output"), +            URLs.paste_service.format(key=key) +        ) +        self.bot.http_session.post.assert_called_once_with( +            URLs.paste_service.format(key="documents"), +            data="My awesome output", +            raise_for_status=True +        ) + +    @async_test +    async def test_upload_output_gracefully_fallback_if_exception_during_request(self): +        """Output upload gracefully fallback if the upload fail.""" +        self.mocked_post.json.side_effect = Exception +        log = logging.getLogger("bot.cogs.snekbox") +        with self.assertLogs(logger=log, level='ERROR'): +            await self.cog.upload_output('My awesome output!') + +    @async_test +    async def test_upload_output_gracefully_fallback_if_no_key_in_response(self): +        """Output upload gracefully fallback if there is no key entry in the response body.""" +        self.mocked_post.json.return_value = {} +        self.assertEqual((await self.cog.upload_output('My awesome output!')), None) + +    def test_prepare_input(self): +        cases = ( +            ('print("Hello world!")', 'print("Hello world!")', 'non-formatted'), +            ('`print("Hello world!")`', 'print("Hello world!")', 'one line code block'), +            ('```\nprint("Hello world!")```', 'print("Hello world!")', 'multiline code block'), +            ('```py\nprint("Hello world!")```', 'print("Hello world!")', 'multiline python code block'), +        ) +        for case, expected, testname in cases: +            with self.subTest(msg=f'Extract code from {testname}.'): +                self.assertEqual(self.cog.prepare_input(case), expected) + +    def test_get_results_message(self): +        """Return error and message according to the eval result.""" +        cases = ( +            ('ERROR', None, ('Your eval job has failed', 'ERROR')), +            ('', 128 + snekbox.SIGKILL, ('Your eval job timed out or ran out of memory', '')), +            ('', 255, ('Your eval job has failed', 'A fatal NsJail error occurred')) +        ) +        for stdout, returncode, expected in cases: +            with self.subTest(stdout=stdout, returncode=returncode, expected=expected): +                actual = self.cog.get_results_message({'stdout': stdout, 'returncode': returncode}) +                self.assertEqual(actual, expected) + +    @patch('bot.cogs.snekbox.Signals', side_effect=ValueError) +    def test_get_results_message_invalid_signal(self, mock_Signals: Mock): +        self.assertEqual( +            self.cog.get_results_message({'stdout': '', 'returncode': 127}), +            ('Your eval job has completed with return code 127', '') +        ) + +    @patch('bot.cogs.snekbox.Signals') +    def test_get_results_message_valid_signal(self, mock_Signals: Mock): +        mock_Signals.return_value.name = 'SIGTEST' +        self.assertEqual( +            self.cog.get_results_message({'stdout': '', 'returncode': 127}), +            ('Your eval job has completed with return code 127 (SIGTEST)', '') +        ) + +    def test_get_status_emoji(self): +        """Return emoji according to the eval result.""" +        cases = ( +            (' ', -1, ':warning:'), +            ('Hello world!', 0, ':white_check_mark:'), +            ('Invalid beard size', -1, ':x:') +        ) +        for stdout, returncode, expected in cases: +            with self.subTest(stdout=stdout, returncode=returncode, expected=expected): +                actual = self.cog.get_status_emoji({'stdout': stdout, 'returncode': returncode}) +                self.assertEqual(actual, expected) + +    @async_test +    async def test_format_output(self): +        """Test output formatting.""" +        self.cog.upload_output = AsyncMock(return_value='https://testificate.com/') + +        too_many_lines = ( +            '001 | v\n002 | e\n003 | r\n004 | y\n005 | l\n006 | o\n' +            '007 | n\n008 | g\n009 | b\n010 | e\n011 | a\n... (truncated - too many lines)' +        ) +        too_long_too_many_lines = ( +            "\n".join( +                f"{i:03d} | {line}" for i, line in enumerate(['verylongbeard' * 10] * 15, 1) +            )[:1000] + "\n... (truncated - too long, too many lines)" +        ) + +        cases = ( +            ('', ('[No output]', None), 'No output'), +            ('My awesome output', ('My awesome output', None), 'One line output'), +            ('<@', ("<@\u200B", None), r'Convert <@ to <@\u200B'), +            ('<!@', ("<!@\u200B", None), r'Convert <!@ to <!@\u200B'), +            ( +                '\u202E\u202E\u202E', +                ('Code block escape attempt detected; will not output result', None), +                'Detect RIGHT-TO-LEFT OVERRIDE' +            ), +            ( +                '\u200B\u200B\u200B', +                ('Code block escape attempt detected; will not output result', None), +                'Detect ZERO WIDTH SPACE' +            ), +            ('long\nbeard', ('001 | long\n002 | beard', None), 'Two line output'), +            ( +                'v\ne\nr\ny\nl\no\nn\ng\nb\ne\na\nr\nd', +                (too_many_lines, 'https://testificate.com/'), +                '12 lines output' +            ), +            ( +                'verylongbeard' * 100, +                ('verylongbeard' * 76 + 'verylongbear\n... (truncated - too long)', 'https://testificate.com/'), +                '1300 characters output' +            ), +            ( +                ('verylongbeard' * 10 + '\n') * 15, +                (too_long_too_many_lines, 'https://testificate.com/'), +                '15 lines, 1965 characters output' +            ), +        ) +        for case, expected, testname in cases: +            with self.subTest(msg=testname, case=case, expected=expected): +                self.assertEqual(await self.cog.format_output(case), expected) + +    @async_test +    async def test_eval_command_evaluate_once(self): +        """Test the eval command procedure.""" +        ctx = MockContext() +        response = MockMessage() +        self.cog.prepare_input = MagicMock(return_value='MyAwesomeFormattedCode') +        self.cog.send_eval = AsyncMock(return_value=response) +        self.cog.continue_eval = AsyncMock(return_value=None) + +        await self.cog.eval_command.callback(self.cog, ctx=ctx, code='MyAwesomeCode') +        self.cog.prepare_input.assert_called_once_with('MyAwesomeCode') +        self.cog.send_eval.assert_called_once_with(ctx, 'MyAwesomeFormattedCode') +        self.cog.continue_eval.assert_called_once_with(ctx, response) + +    @async_test +    async def test_eval_command_evaluate_twice(self): +        """Test the eval and re-eval command procedure.""" +        ctx = MockContext() +        response = MockMessage() +        self.cog.prepare_input = MagicMock(return_value='MyAwesomeFormattedCode') +        self.cog.send_eval = AsyncMock(return_value=response) +        self.cog.continue_eval = AsyncMock() +        self.cog.continue_eval.side_effect = ('MyAwesomeCode-2', None) + +        await self.cog.eval_command.callback(self.cog, ctx=ctx, code='MyAwesomeCode') +        self.cog.prepare_input.has_calls(call('MyAwesomeCode'), call('MyAwesomeCode-2')) +        self.cog.send_eval.assert_called_with(ctx, 'MyAwesomeFormattedCode') +        self.cog.continue_eval.assert_called_with(ctx, response) + +    @async_test +    async def test_eval_command_reject_two_eval_at_the_same_time(self): +        """Test if the eval command rejects an eval if the author already have a running eval.""" +        ctx = MockContext() +        ctx.author.id = 42 +        ctx.author.mention = '@LemonLemonishBeard#0042' +        ctx.send = AsyncMock() +        self.cog.jobs = (42,) +        await self.cog.eval_command.callback(self.cog, ctx=ctx, code='MyAwesomeCode') +        ctx.send.assert_called_once_with( +            "@LemonLemonishBeard#0042 You've already got a job running - please wait for it to finish!" +        ) + +    @async_test +    async def test_eval_command_call_help(self): +        """Test if the eval command call the help command if no code is provided.""" +        ctx = MockContext() +        ctx.invoke = AsyncMock() +        await self.cog.eval_command.callback(self.cog, ctx=ctx, code='') +        ctx.invoke.assert_called_once_with(self.bot.get_command("help"), "eval") + +    @async_test +    async def test_send_eval(self): +        """Test the send_eval function.""" +        ctx = MockContext() +        ctx.message = MockMessage() +        ctx.send = AsyncMock() +        ctx.author.mention = '@LemonLemonishBeard#0042' +        ctx.typing = MagicMock(return_value=AsyncContextManagerMock(None)) +        self.cog.post_eval = AsyncMock(return_value={'stdout': '', 'returncode': 0}) +        self.cog.get_results_message = MagicMock(return_value=('Return code 0', '')) +        self.cog.get_status_emoji = MagicMock(return_value=':yay!:') +        self.cog.format_output = AsyncMock(return_value=('[No output]', None)) + +        await self.cog.send_eval(ctx, 'MyAwesomeCode') +        ctx.send.assert_called_once_with( +            '@LemonLemonishBeard#0042 :yay!: Return code 0.\n\n```py\n[No output]\n```' +        ) +        self.cog.post_eval.assert_called_once_with('MyAwesomeCode') +        self.cog.get_status_emoji.assert_called_once_with({'stdout': '', 'returncode': 0}) +        self.cog.get_results_message.assert_called_once_with({'stdout': '', 'returncode': 0}) +        self.cog.format_output.assert_called_once_with('') + +    @async_test +    async def test_send_eval_with_paste_link(self): +        """Test the send_eval function with a too long output that generate a paste link.""" +        ctx = MockContext() +        ctx.message = MockMessage() +        ctx.send = AsyncMock() +        ctx.author.mention = '@LemonLemonishBeard#0042' +        ctx.typing = MagicMock(return_value=AsyncContextManagerMock(None)) +        self.cog.post_eval = AsyncMock(return_value={'stdout': 'Way too long beard', 'returncode': 0}) +        self.cog.get_results_message = MagicMock(return_value=('Return code 0', '')) +        self.cog.get_status_emoji = MagicMock(return_value=':yay!:') +        self.cog.format_output = AsyncMock(return_value=('Way too long beard', 'lookatmybeard.com')) + +        await self.cog.send_eval(ctx, 'MyAwesomeCode') +        ctx.send.assert_called_once_with( +            '@LemonLemonishBeard#0042 :yay!: Return code 0.' +            '\n\n```py\nWay too long beard\n```\nFull output: lookatmybeard.com' +        ) +        self.cog.post_eval.assert_called_once_with('MyAwesomeCode') +        self.cog.get_status_emoji.assert_called_once_with({'stdout': 'Way too long beard', 'returncode': 0}) +        self.cog.get_results_message.assert_called_once_with({'stdout': 'Way too long beard', 'returncode': 0}) +        self.cog.format_output.assert_called_once_with('Way too long beard') + +    @async_test +    async def test_send_eval_with_non_zero_eval(self): +        """Test the send_eval function with a code returning a non-zero code.""" +        ctx = MockContext() +        ctx.message = MockMessage() +        ctx.send = AsyncMock() +        ctx.author.mention = '@LemonLemonishBeard#0042' +        ctx.typing = MagicMock(return_value=AsyncContextManagerMock(None)) +        self.cog.post_eval = AsyncMock(return_value={'stdout': 'ERROR', 'returncode': 127}) +        self.cog.get_results_message = MagicMock(return_value=('Return code 127', 'Beard got stuck in the eval')) +        self.cog.get_status_emoji = MagicMock(return_value=':nope!:') +        self.cog.format_output = AsyncMock()  # This function isn't called + +        await self.cog.send_eval(ctx, 'MyAwesomeCode') +        ctx.send.assert_called_once_with( +            '@LemonLemonishBeard#0042 :nope!: Return code 127.\n\n```py\nBeard got stuck in the eval\n```' +        ) +        self.cog.post_eval.assert_called_once_with('MyAwesomeCode') +        self.cog.get_status_emoji.assert_called_once_with({'stdout': 'ERROR', 'returncode': 127}) +        self.cog.get_results_message.assert_called_once_with({'stdout': 'ERROR', 'returncode': 127}) +        self.cog.format_output.assert_not_called() + +    @async_test +    async def test_continue_eval_does_continue(self): +        """Test that the continue_eval function does continue if required conditions are met.""" +        ctx = MockContext(message=MockMessage(add_reaction=AsyncMock(), clear_reactions=AsyncMock())) +        response = MockMessage(delete=AsyncMock()) +        new_msg = MockMessage(content='!e NewCode') +        self.bot.wait_for.side_effect = ((None, new_msg), None) + +        actual = await self.cog.continue_eval(ctx, response) +        self.assertEqual(actual, 'NewCode') +        self.bot.wait_for.has_calls( +            call('message_edit', partial(snekbox.predicate_eval_message_edit, ctx), timeout=10), +            call('reaction_add', partial(snekbox.predicate_eval_emoji_reaction, ctx), timeout=10) +        ) +        ctx.message.add_reaction.assert_called_once_with(snekbox.REEVAL_EMOJI) +        ctx.message.clear_reactions.assert_called_once() +        response.delete.assert_called_once() + +    @async_test +    async def test_continue_eval_does_not_continue(self): +        ctx = MockContext(message=MockMessage(clear_reactions=AsyncMock())) +        self.bot.wait_for.side_effect = asyncio.TimeoutError + +        actual = await self.cog.continue_eval(ctx, MockMessage()) +        self.assertEqual(actual, None) +        ctx.message.clear_reactions.assert_called_once() + +    def test_predicate_eval_message_edit(self): +        """Test the predicate_eval_message_edit function.""" +        msg0 = MockMessage(id=1, content='abc') +        msg1 = MockMessage(id=2, content='abcdef') +        msg2 = MockMessage(id=1, content='abcdef') + +        cases = ( +            (msg0, msg0, False, 'same ID, same content'), +            (msg0, msg1, False, 'different ID, different content'), +            (msg0, msg2, True, 'same ID, different content') +        ) +        for ctx_msg, new_msg, expected, testname in cases: +            with self.subTest(msg=f'Messages with {testname} return {expected}'): +                ctx = MockContext(message=ctx_msg) +                actual = snekbox.predicate_eval_message_edit(ctx, ctx_msg, new_msg) +                self.assertEqual(actual, expected) + +    def test_predicate_eval_emoji_reaction(self): +        """Test the predicate_eval_emoji_reaction function.""" +        valid_reaction = MockReaction(message=MockMessage(id=1)) +        valid_reaction.__str__.return_value = snekbox.REEVAL_EMOJI +        valid_ctx = MockContext(message=MockMessage(id=1), author=MockUser(id=2)) +        valid_user = MockUser(id=2) + +        invalid_reaction_id = MockReaction(message=MockMessage(id=42)) +        invalid_reaction_id.__str__.return_value = snekbox.REEVAL_EMOJI +        invalid_user_id = MockUser(id=42) +        invalid_reaction_str = MockReaction(message=MockMessage(id=1)) +        invalid_reaction_str.__str__.return_value = ':longbeard:' + +        cases = ( +            (invalid_reaction_id, valid_user, False, 'invalid reaction ID'), +            (valid_reaction, invalid_user_id, False, 'invalid user ID'), +            (invalid_reaction_str, valid_user, False, 'invalid reaction __str__'), +            (valid_reaction, valid_user, True, 'matching attributes') +        ) +        for reaction, user, expected, testname in cases: +            with self.subTest(msg=f'Test with {testname} and expected return {expected}'): +                actual = snekbox.predicate_eval_emoji_reaction(valid_ctx, reaction, user) +                self.assertEqual(actual, expected) + + +class SnekboxSetupTests(unittest.TestCase): +    """Tests setup of the `Snekbox` cog.""" + +    def test_setup(self): +        """Setup of the extension should call add_cog.""" +        bot = MockBot() +        snekbox.setup(bot) +        bot.add_cog.assert_called_once() diff --git a/tests/bot/test_converters.py b/tests/bot/test_converters.py index b2b78d9dd..1e5ca62ae 100644 --- a/tests/bot/test_converters.py +++ b/tests/bot/test_converters.py @@ -68,7 +68,7 @@ class ConverterTests(unittest.TestCase):              ('👋', "Don't be ridiculous, you can't use that character!"),              ('', "Tag names should not be empty, or filled with whitespace."),              ('  ', "Tag names should not be empty, or filled with whitespace."), -            ('42', "Tag names can't be numbers."), +            ('42', "Tag names must contain at least one letter."),              ('x' * 128, "Are you insane? That's way too long!"),          ) diff --git a/tests/bot/test_utils.py b/tests/bot/test_utils.py index 58ae2a81a..d7bcc3ba6 100644 --- a/tests/bot/test_utils.py +++ b/tests/bot/test_utils.py @@ -35,18 +35,3 @@ class CaseInsensitiveDictTests(unittest.TestCase):          instance = utils.CaseInsensitiveDict()          instance.update({'FOO': 'bar'})          self.assertEqual(instance['foo'], 'bar') - - -class ChunkTests(unittest.TestCase): -    """Tests the `chunk` method.""" - -    def test_empty_chunking(self): -        """Tests chunking on an empty iterable.""" -        generator = utils.chunks(iterable=[], size=5) -        self.assertEqual(list(generator), []) - -    def test_list_chunking(self): -        """Tests chunking a non-empty list.""" -        iterable = [1, 2, 3, 4, 5] -        generator = utils.chunks(iterable=iterable, size=2) -        self.assertEqual(list(generator), [[1, 2], [3, 4], [5]]) | 
