repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
microsoft/botbuilder-python
1,312
microsoft__botbuilder-python-1312
[ "1300" ]
dc9d4f00d2214ee855c04b996fcd44fb268be03e
diff --git a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py --- a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py +++ b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py @@ -279,10 +279,18 @@ async def continue_conversation( context.turn_state[BotAdapter.BOT_CALLBACK_HANDLER_KEY] = callback context.turn_state[BotAdapter.BOT_OAUTH_SCOPE_KEY] = audience - # Add the channel service URL to the trusted services list so we can send messages back. - # the service URL for skills is trusted because it is applied by the SkillHandler based - # on the original request received by the root bot - AppCredentials.trust_service_url(reference.service_url) + # If we receive a valid app id in the incoming token claims, add the channel service URL to the + # trusted services list so we can send messages back. + # The service URL for skills is trusted because it is applied by the SkillHandler based on the original + # request received by the root bot + app_id_from_claims = JwtTokenValidation.get_app_id_from_claims( + claims_identity.claims + ) + if app_id_from_claims: + if SkillValidation.is_skill_claim( + claims_identity.claims + ) or await self._credential_provider.is_valid_appid(app_id_from_claims): + AppCredentials.trust_service_url(reference.service_url) client = await self.create_connector_client( reference.service_url, claims_identity, audience
diff --git a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py --- a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py +++ b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py @@ -571,8 +571,14 @@ async def callback(context: TurnContext): scope = context.turn_state[BotFrameworkAdapter.BOT_OAUTH_SCOPE_KEY] assert AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE == scope + # Ensure the serviceUrl was added to the trusted hosts + assert AppCredentials.is_trusted_service(channel_service_url) + refs = ConversationReference(service_url=channel_service_url) + # Ensure the serviceUrl is NOT in the trusted hosts + assert not AppCredentials.is_trusted_service(channel_service_url) + await adapter.continue_conversation( refs, callback, claims_identity=skills_identity ) @@ -629,8 +635,14 @@ async def callback(context: TurnContext): scope = context.turn_state[BotFrameworkAdapter.BOT_OAUTH_SCOPE_KEY] assert skill_2_app_id == scope + # Ensure the serviceUrl was added to the trusted hosts + assert AppCredentials.is_trusted_service(skill_2_service_url) + refs = ConversationReference(service_url=skill_2_service_url) + # Ensure the serviceUrl is NOT in the trusted hosts + assert not AppCredentials.is_trusted_service(skill_2_service_url) + await adapter.continue_conversation( refs, callback, claims_identity=skills_identity, audience=skill_2_app_id )
[PORT] Fixes Unauthorized error when calling ContinueConversation > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4348 Fixes #4347 Fixes Unauthorized error when ContinueConversation is called on a bot app service instance that didn't originate the skill request. - Updated code in BotFrameworkAdapter.ContinueConversation() so it always adds the activity.ServiceUrl to TrustedHostNames if the request has been authorized and it has an app ID (this will work for skills and other proactive messages) - Made TrustedHostNames in AppCredentials internal and added InternalsVisibleTo directive to Microsoft.BotConnector so we can access it and assert the state of the state in tests. - Cleaned up some async warnings in BotFrameworkAdapterTests - Added additional logging to AppCredentials to help troubleshoot this issue in the future. # Changed projects * Microsoft.Bot.Builder * Microsoft.Bot.Connector * Microsoft.Bot.Builder.Tests
Hi @axelsrz, can you validate if the related PR applies to Python?
2020-08-04T13:52:41
microsoft/botbuilder-python
1,326
microsoft__botbuilder-python-1326
[ "1322" ]
e3eedd429fc70bf5ffb2cf43be76b49acc6f8fb6
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py @@ -4,6 +4,7 @@ from copy import deepcopy from typing import List +from botframework.connector.token_api.models import TokenExchangeRequest from botbuilder.schema import ( Activity, ActivityTypes, @@ -22,13 +23,16 @@ DialogReason, DialogInstance, ) -from botframework.connector.token_api.models import TokenExchangeRequest from .begin_skill_dialog_options import BeginSkillDialogOptions from .skill_dialog_options import SkillDialogOptions class SkillDialog(Dialog): + SKILLCONVERSATIONIDSTATEKEY = ( + "Microsoft.Bot.Builder.Dialogs.SkillDialog.SkillConversationId" + ) + def __init__(self, dialog_options: SkillDialogOptions, dialog_id: str): super().__init__(dialog_id) if not dialog_options: @@ -65,8 +69,18 @@ async def begin_dialog(self, dialog_context: DialogContext, options: object = No self._deliver_mode_state_key ] = dialog_args.activity.delivery_mode + # Create the conversationId and store it in the dialog context state so we can use it later + skill_conversation_id = await self._create_skill_conversation_id( + dialog_context.context, dialog_context.context.activity + ) + dialog_context.active_dialog.state[ + SkillDialog.SKILLCONVERSATIONIDSTATEKEY + ] = skill_conversation_id + # Send the activity to the skill. - eoc_activity = await self._send_to_skill(dialog_context.context, skill_activity) + eoc_activity = await self._send_to_skill( + dialog_context.context, skill_activity, skill_conversation_id + ) if eoc_activity: return await dialog_context.end_dialog(eoc_activity.value) @@ -101,7 +115,12 @@ async def continue_dialog(self, dialog_context: DialogContext): ] # Just forward to the remote skill - eoc_activity = await self._send_to_skill(dialog_context.context, skill_activity) + skill_conversation_id = dialog_context.active_dialog.state[ + SkillDialog.SKILLCONVERSATIONIDSTATEKEY + ] + eoc_activity = await self._send_to_skill( + dialog_context.context, skill_activity, skill_conversation_id + ) if eoc_activity: return await dialog_context.end_dialog(eoc_activity.value) @@ -123,7 +142,8 @@ async def reprompt_dialog( # pylint: disable=unused-argument ) # connection Name is not applicable for a RePrompt, as we don't expect as OAuthCard in response. - await self._send_to_skill(context, reprompt_event) + skill_conversation_id = instance.state[SkillDialog.SKILLCONVERSATIONIDSTATEKEY] + await self._send_to_skill(context, reprompt_event, skill_conversation_id) async def resume_dialog( # pylint: disable=unused-argument self, dialog_context: "DialogContext", reason: DialogReason, result: object @@ -152,7 +172,10 @@ async def end_dialog( activity.additional_properties = context.activity.additional_properties # connection Name is not applicable for an EndDialog, as we don't expect as OAuthCard in response. - await self._send_to_skill(context, activity) + skill_conversation_id = instance.state[ + SkillDialog.SKILLCONVERSATIONIDSTATEKEY + ] + await self._send_to_skill(context, activity, skill_conversation_id) await super().end_dialog(context, instance, reason) @@ -187,7 +210,7 @@ def _on_validate_activity( return True async def _send_to_skill( - self, context: TurnContext, activity: Activity + self, context: TurnContext, activity: Activity, skill_conversation_id: str ) -> Activity: if activity.type == ActivityTypes.invoke: # Force ExpectReplies for invoke activities so we can get the replies right away and send @@ -195,10 +218,6 @@ async def _send_to_skill( # response from the skill and any other activities sent, including EoC. activity.delivery_mode = DeliveryModes.expect_replies - skill_conversation_id = await self._create_skill_conversation_id( - context, activity - ) - # Always save state before forwarding # (the dialog stack won't get updated with the skillDialog and things won't work if you don't) await self.dialog_options.conversation_state.save_changes(context, True)
diff --git a/libraries/botbuilder-dialogs/tests/test_skill_dialog.py b/libraries/botbuilder-dialogs/tests/test_skill_dialog.py --- a/libraries/botbuilder-dialogs/tests/test_skill_dialog.py +++ b/libraries/botbuilder-dialogs/tests/test_skill_dialog.py @@ -6,6 +6,7 @@ from unittest.mock import Mock import aiounittest +from botframework.connector.token_api.models import TokenExchangeResource from botbuilder.core import ( ConversationState, MemoryStorage, @@ -40,7 +41,6 @@ BeginSkillDialogOptions, DialogTurnStatus, ) -from botframework.connector.token_api.models import TokenExchangeResource class SimpleConversationIdFactory(ConversationIdFactoryBase): @@ -148,10 +148,13 @@ async def capture( conversation_state=conversation_state, ) + assert len(dialog_options.conversation_id_factory.conversation_refs) == 0 + # Send something to the dialog to start it await client.send_activity(MessageFactory.text("irrelevant")) # Assert results and data sent to the SkillClient for fist turn + assert len(dialog_options.conversation_id_factory.conversation_refs) == 1 assert dialog_options.bot_id == from_bot_id_sent assert dialog_options.skill.app_id == to_bot_id_sent assert dialog_options.skill.skill_endpoint == to_url_sent @@ -162,6 +165,7 @@ async def capture( await client.send_activity(MessageFactory.text("Second message")) # Assert results for second turn + assert len(dialog_options.conversation_id_factory.conversation_refs) == 1 assert activity_sent.text == "Second message" assert DialogTurnStatus.Waiting == client.dialog_turn_result.status
[PORT] Refactored SkillDialog to call ConversationFacotry.CreateConversationId only once > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4382 Refactored SkillDialog to call the factory only during begin and store the generated conversationId in state to be used in subsequent calls to the skill. Needs parity for python and JS Fixes #4374 # Changed projects * Microsoft.Bot.Builder.Dialogs * Microsoft.Bot.Builder.Dialogs.Tests
2020-08-06T14:19:39
microsoft/botbuilder-python
1,387
microsoft__botbuilder-python-1387
[ "1159" ]
3bfdc9fdfb55fd83b266d003aaa4d7f0eff7ed5a
diff --git a/libraries/botbuilder-ai/botbuilder/ai/luis/luis_recognizer.py b/libraries/botbuilder-ai/botbuilder/ai/luis/luis_recognizer.py --- a/libraries/botbuilder-ai/botbuilder/ai/luis/luis_recognizer.py +++ b/libraries/botbuilder-ai/botbuilder/ai/luis/luis_recognizer.py @@ -272,7 +272,7 @@ async def _recognize_internal( if not utterance or utterance.isspace(): recognizer_result = RecognizerResult( - text=utterance, intents={"": IntentScore(score=1.0)}, entities={} + text=utterance ) else:
[PORT] Fix LuisRecognizer to not return intents if there is no utterance > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4129 Fixes #4117 ## Description Luis Recognizer returns an intent of string.empty with a perfect score of 1.0. This messes up logic downstream in RecognizerSet because it thinks it has a perfect intent. ## Specific Changes * Changed LuisRecognizer to return empy RecognizerResult (no intents or entities) when there is no utterance. ## Testing Changed RegexRecognizer to behave just like LuisRecognizer so that ValueRecognizer tests are valid check. # Changed projects * Microsoft.Bot.Builder.AI.LUIS * Microsoft.Bot.Builder.Dialogs.Adaptive * Microsoft.Bot.Builder.AI.LUIS.Tests
2020-09-20T19:20:57
microsoft/botbuilder-python
1,395
microsoft__botbuilder-python-1395
[ "1373" ]
96299630faaf05b807fae2106f484c7371362e1a
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py @@ -249,6 +249,11 @@ async def _send_to_skill( if from_skill_activity.type == ActivityTypes.end_of_conversation: # Capture the EndOfConversation activity if it was sent from skill eoc_activity = from_skill_activity + + # The conversation has ended, so cleanup the conversation id + await self.dialog_options.conversation_id_factory.delete_conversation_reference( + skill_conversation_id + ) elif await self._intercept_oauth_cards( context, from_skill_activity, self.dialog_options.connection_name ):
diff --git a/libraries/botbuilder-dialogs/tests/test_skill_dialog.py b/libraries/botbuilder-dialogs/tests/test_skill_dialog.py --- a/libraries/botbuilder-dialogs/tests/test_skill_dialog.py +++ b/libraries/botbuilder-dialogs/tests/test_skill_dialog.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. import uuid from http import HTTPStatus -from typing import Callable, Union +from typing import Callable, Union, List from unittest.mock import Mock import aiounittest @@ -46,6 +46,7 @@ class SimpleConversationIdFactory(ConversationIdFactoryBase): def __init__(self): self.conversation_refs = {} + self.create_count = 0 async def create_skill_conversation_id( self, @@ -53,6 +54,7 @@ async def create_skill_conversation_id( SkillConversationIdFactoryOptions, ConversationReference ], ) -> str: + self.create_count += 1 key = ( options_or_conversation_reference.activity.conversation.id + options_or_conversation_reference.activity.service_url @@ -72,7 +74,8 @@ async def get_conversation_reference( return self.conversation_refs[skill_conversation_id] async def delete_conversation_reference(self, skill_conversation_id: str): - raise NotImplementedError() + self.conversation_refs.pop(skill_conversation_id, None) + return class SkillDialogTests(aiounittest.AsyncTestCase): @@ -506,6 +509,57 @@ async def post_return(): self.assertIsNotNone(final_activity) self.assertEqual(len(final_activity.attachments), 1) + async def test_end_of_conversation_from_expect_replies_calls_delete_conversation_reference( + self, + ): + activity_sent: Activity = None + + # Callback to capture the parameters sent to the skill + async def capture_action( + from_bot_id: str, # pylint: disable=unused-argument + to_bot_id: str, # pylint: disable=unused-argument + to_uri: str, # pylint: disable=unused-argument + service_url: str, # pylint: disable=unused-argument + conversation_id: str, # pylint: disable=unused-argument + activity: Activity, + ): + # Capture values sent to the skill so we can assert the right parameters were used. + nonlocal activity_sent + activity_sent = activity + + eoc = Activity.create_end_of_conversation_activity() + expected_replies = list([eoc]) + + # Create a mock skill client to intercept calls and capture what is sent. + mock_skill_client = self._create_mock_skill_client( + capture_action, expected_replies=expected_replies + ) + + # Use Memory for conversation state + conversation_state = ConversationState(MemoryStorage()) + dialog_options = self.create_skill_dialog_options( + conversation_state, mock_skill_client + ) + + # Create the SkillDialogInstance and the activity to send. + sut = SkillDialog(dialog_options, dialog_id="dialog") + activity_to_send = Activity.create_message_activity() + activity_to_send.delivery_mode = DeliveryModes.expect_replies + activity_to_send.text = str(uuid.uuid4()) + client = DialogTestClient( + "test", + sut, + BeginSkillDialogOptions(activity_to_send), + conversation_state=conversation_state, + ) + + # Send something to the dialog to start it + await client.send_activity("hello") + + simple_id_factory: SimpleConversationIdFactory = dialog_options.conversation_id_factory + self.assertEqual(0, len(simple_id_factory.conversation_refs)) + self.assertEqual(1, simple_id_factory.create_count) + @staticmethod def create_skill_dialog_options( conversation_state: ConversationState, @@ -547,9 +601,15 @@ def create_oauth_card_attachment_activity(uri: str) -> Activity: return attachment_activity def _create_mock_skill_client( - self, callback: Callable, return_status: Union[Callable, int] = 200 + self, + callback: Callable, + return_status: Union[Callable, int] = 200, + expected_replies: List[Activity] = None, ) -> BotFrameworkClient: mock_client = Mock() + activity_list = ExpectedReplies( + activities=expected_replies or [MessageFactory.text("dummy activity")] + ) async def mock_post_activity( from_bot_id: str, @@ -572,7 +632,7 @@ async def mock_post_activity( if isinstance(return_status, Callable): return await return_status() - return InvokeResponse(status=return_status) + return InvokeResponse(status=return_status, body=activity_list) mock_client.post_activity.side_effect = mock_post_activity
SkillDialog doesn't call SkillConversationIdFactory.DeleteConversationReference when using ExpectReplies See [parent](https://github.com/microsoft/botframework-sdk/issues/6019)
2020-10-01T22:57:30
microsoft/botbuilder-python
1,399
microsoft__botbuilder-python-1399
[ "1388" ]
58078999ca0ea8c7f53d8eb21c6ab2e7f9da51ce
diff --git a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py --- a/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py +++ b/libraries/botbuilder-core/botbuilder/core/show_typing_middleware.py @@ -4,7 +4,9 @@ from typing import Awaitable, Callable from botbuilder.schema import Activity, ActivityTypes +from botframework.connector.auth import ClaimsIdentity, SkillValidation +from .bot_adapter import BotAdapter from .middleware_set import Middleware from .turn_context import TurnContext @@ -82,9 +84,12 @@ async def aux(): def stop_interval(): timer.set_clear_timer() - # if it's a message, start sending typing activities until the - # bot logic is done. - if context.activity.type == ActivityTypes.message: + # Start a timer to periodically send the typing activity + # (bots running as skills should not send typing activity) + if ( + context.activity.type == ActivityTypes.message + and not ShowTypingMiddleware._is_skill_bot(context) + ): start_interval(context, self._delay, self._period) # call the bot logic @@ -93,3 +98,10 @@ def stop_interval(): stop_interval() return result + + @staticmethod + def _is_skill_bot(context: TurnContext) -> bool: + claims_identity = context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY) + return isinstance( + claims_identity, ClaimsIdentity + ) and SkillValidation.is_skill_claim(claims_identity.claims)
diff --git a/libraries/botbuilder-core/botbuilder/core/adapters/test_adapter.py b/libraries/botbuilder-core/botbuilder/core/adapters/test_adapter.py --- a/libraries/botbuilder-core/botbuilder/core/adapters/test_adapter.py +++ b/libraries/botbuilder-core/botbuilder/core/adapters/test_adapter.py @@ -153,7 +153,7 @@ async def process_activity( self._conversation_lock.release() activity.timestamp = activity.timestamp or datetime.utcnow() - await self.run_pipeline(TurnContext(self, activity), logic) + await self.run_pipeline(self.create_turn_context(activity), logic) async def send_activities( self, context, activities: List[Activity] @@ -227,7 +227,7 @@ async def create_conversation( members_removed=[], conversation=ConversationAccount(id=str(uuid.uuid4())), ) - context = TurnContext(self, update) + context = self.create_turn_context(update) return await callback(context) async def receive_activity(self, activity): @@ -252,7 +252,7 @@ async def receive_activity(self, activity): request.id = str(self._next_id) # Create context object and run middleware. - context = TurnContext(self, request) + context = self.create_turn_context(request) return await self.run_pipeline(context, self.logic) def get_next_activity(self) -> Activity: @@ -534,6 +534,9 @@ async def exchange_token_from_credentials( return None + def create_turn_context(self, activity: Activity) -> TurnContext: + return TurnContext(self, activity) + class TestFlow: __test__ = False diff --git a/libraries/botbuilder-core/tests/test_show_typing_middleware.py b/libraries/botbuilder-core/tests/test_show_typing_middleware.py --- a/libraries/botbuilder-core/tests/test_show_typing_middleware.py +++ b/libraries/botbuilder-core/tests/test_show_typing_middleware.py @@ -1,11 +1,31 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import asyncio +from uuid import uuid4 import aiounittest -from botbuilder.core import ShowTypingMiddleware +from botbuilder.core import ShowTypingMiddleware, TurnContext from botbuilder.core.adapters import TestAdapter -from botbuilder.schema import ActivityTypes +from botbuilder.schema import Activity, ActivityTypes +from botframework.connector.auth import AuthenticationConstants, ClaimsIdentity + + +class SkillTestAdapter(TestAdapter): + def create_turn_context(self, activity: Activity) -> TurnContext: + turn_context = super().create_turn_context(activity) + + claims_identity = ClaimsIdentity( + claims={ + AuthenticationConstants.VERSION_CLAIM: "2.0", + AuthenticationConstants.AUDIENCE_CLAIM: str(uuid4()), + AuthenticationConstants.AUTHORIZED_PARTY: str(uuid4()), + }, + is_authenticated=True, + ) + + turn_context.turn_state[self.BOT_IDENTITY_KEY] = claims_identity + + return turn_context class TestShowTypingMiddleware(aiounittest.AsyncTestCase): @@ -65,3 +85,14 @@ def assert_is_message(activity, description): # pylint: disable=unused-argument step1 = await adapter.send("foo") await step1.assert_reply(assert_is_message) + + async def test_not_send_not_send_typing_indicator_when_bot_running_as_skill(self): + async def aux(context): + await asyncio.sleep(1) + await context.send_activity(f"echo:{context.activity.text}") + + skill_adapter = SkillTestAdapter(aux) + skill_adapter.use(ShowTypingMiddleware(0.001, 1)) + + step1 = await skill_adapter.send("foo") + await step1.assert_reply("echo:foo")
Avoid sending typing activity when bot is invoked as skill We should port this once the C# PR is merged. See [parent](https://github.com/microsoft/botframework-sdk/issues/6049)
2020-10-05T21:17:30
microsoft/botbuilder-python
1,401
microsoft__botbuilder-python-1401
[ "822" ]
58078999ca0ea8c7f53d8eb21c6ab2e7f9da51ce
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py --- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py +++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py @@ -50,7 +50,7 @@ async def post_activity_to_skill( originating_audience = ( GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE if self._channel_provider is not None - and self._channel_provider.IsGovernment() + and self._channel_provider.is_government() else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE )
diff --git a/libraries/botbuilder-integration-aiohttp/tests/skills/test_skill_http_client.py b/libraries/botbuilder-integration-aiohttp/tests/skills/test_skill_http_client.py new file mode 100644 --- /dev/null +++ b/libraries/botbuilder-integration-aiohttp/tests/skills/test_skill_http_client.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from uuid import uuid4 +from typing import Awaitable, Callable, Dict, Union + + +from unittest.mock import Mock +import aiounittest + +from botbuilder.core import MessageFactory, InvokeResponse +from botbuilder.core.skills import ( + BotFrameworkSkill, + ConversationIdFactoryBase, + SkillConversationIdFactoryOptions, + SkillConversationReference, +) +from botbuilder.integration.aiohttp.skills import SkillHttpClient +from botbuilder.schema import Activity, ConversationAccount, ConversationReference +from botframework.connector.auth import ( + AuthenticationConstants, + ChannelProvider, + GovernmentConstants, +) + + +class SimpleConversationIdFactory(ConversationIdFactoryBase): + def __init__(self, conversation_id: str): + self._conversation_id = conversation_id + self._conversation_refs: Dict[str, SkillConversationReference] = {} + # Public property to capture and assert the options passed to CreateSkillConversationIdAsync. + self.creation_options: SkillConversationIdFactoryOptions = None + + async def create_skill_conversation_id( + self, + options_or_conversation_reference: Union[ + SkillConversationIdFactoryOptions, ConversationReference + ], + ) -> str: + self.creation_options = options_or_conversation_reference + + key = self._conversation_id + self._conversation_refs[key] = self._conversation_refs.get( + key, + SkillConversationReference( + conversation_reference=options_or_conversation_reference.activity.get_conversation_reference(), + oauth_scope=options_or_conversation_reference.from_bot_oauth_scope, + ), + ) + return key + + async def get_conversation_reference( + self, skill_conversation_id: str + ) -> SkillConversationReference: + return self._conversation_refs[skill_conversation_id] + + async def delete_conversation_reference(self, skill_conversation_id: str): + raise NotImplementedError() + + +class TestSkillHttpClientTests(aiounittest.AsyncTestCase): + async def test_post_activity_with_originating_audience(self): + conversation_id = str(uuid4()) + conversation_id_factory = SimpleConversationIdFactory(conversation_id) + test_activity = MessageFactory.text("some message") + test_activity.conversation = ConversationAccount() + skill = BotFrameworkSkill( + id="SomeSkill", + app_id="", + skill_endpoint="https://someskill.com/api/messages", + ) + + async def _mock_post_content( + to_url: str, + token: str, # pylint: disable=unused-argument + activity: Activity, + ) -> (int, object): + nonlocal self + self.assertEqual(skill.skill_endpoint, to_url) + # Assert that the activity being sent has what we expect. + self.assertEqual(conversation_id, activity.conversation.id) + self.assertEqual("https://parentbot.com/api/messages", activity.service_url) + + # Create mock response. + return 200, None + + sut = await self._create_http_client_with_mock_handler( + _mock_post_content, conversation_id_factory + ) + + result = await sut.post_activity_to_skill( + "", + skill, + "https://parentbot.com/api/messages", + test_activity, + "someOriginatingAudience", + ) + + # Assert factory options + self.assertEqual("", conversation_id_factory.creation_options.from_bot_id) + self.assertEqual( + "someOriginatingAudience", + conversation_id_factory.creation_options.from_bot_oauth_scope, + ) + self.assertEqual( + test_activity, conversation_id_factory.creation_options.activity + ) + self.assertEqual( + skill, conversation_id_factory.creation_options.bot_framework_skill + ) + + # Assert result + self.assertIsInstance(result, InvokeResponse) + self.assertEqual(200, result.status) + + async def test_post_activity_using_invoke_response(self): + for is_gov in [True, False]: + with self.subTest(is_government=is_gov): + # pylint: disable=undefined-variable + # pylint: disable=cell-var-from-loop + conversation_id = str(uuid4()) + conversation_id_factory = SimpleConversationIdFactory(conversation_id) + test_activity = MessageFactory.text("some message") + test_activity.conversation = ConversationAccount() + expected_oauth_scope = ( + AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE + ) + mock_channel_provider: ChannelProvider = Mock(spec=ChannelProvider) + + def is_government_mock(): + nonlocal expected_oauth_scope + if is_government: + expected_oauth_scope = ( + GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE + ) + + return is_government + + mock_channel_provider.is_government = Mock( + side_effect=is_government_mock + ) + + skill = BotFrameworkSkill( + id="SomeSkill", + app_id="", + skill_endpoint="https://someskill.com/api/messages", + ) + + async def _mock_post_content( + to_url: str, + token: str, # pylint: disable=unused-argument + activity: Activity, + ) -> (int, object): + nonlocal self + + self.assertEqual(skill.skill_endpoint, to_url) + # Assert that the activity being sent has what we expect. + self.assertEqual(conversation_id, activity.conversation.id) + self.assertEqual( + "https://parentbot.com/api/messages", activity.service_url + ) + + # Create mock response. + return 200, None + + sut = await self._create_http_client_with_mock_handler( + _mock_post_content, conversation_id_factory + ) + result = await sut.post_activity_to_skill( + "", skill, "https://parentbot.com/api/messages", test_activity + ) + + # Assert factory options + self.assertEqual( + "", conversation_id_factory.creation_options.from_bot_id + ) + self.assertEqual( + expected_oauth_scope, + conversation_id_factory.creation_options.from_bot_oauth_scope, + ) + self.assertEqual( + test_activity, conversation_id_factory.creation_options.activity + ) + self.assertEqual( + skill, conversation_id_factory.creation_options.bot_framework_skill + ) + + # Assert result + self.assertIsInstance(result, InvokeResponse) + self.assertEqual(200, result.status) + + # Helper to create an HttpClient with a mock message handler that executes function argument to validate the request + # and mock a response. + async def _create_http_client_with_mock_handler( + self, + value_function: Callable[[object], Awaitable[object]], + id_factory: ConversationIdFactoryBase, + channel_provider: ChannelProvider = None, + ) -> SkillHttpClient: + # pylint: disable=protected-access + client = SkillHttpClient(Mock(), id_factory, channel_provider) + client._post_content = value_function + await client._session.close() + + return client
Add tests for SkillHttpClient see dotnet and javascript imp [enhancement]
2020-10-07T03:04:50
microsoft/botbuilder-python
1,402
microsoft__botbuilder-python-1402
[ "1228" ]
b9678d5693aa6a2b804d3571c08447ab3b9aac7c
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py @@ -1,22 +1,21 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +from botframework.connector.auth import ( + ClaimsIdentity, + SkillValidation, + AuthenticationConstants, + GovernmentConstants, +) from botbuilder.core import BotAdapter, StatePropertyAccessor, TurnContext from botbuilder.core.skills import SkillHandler, SkillConversationReference - from botbuilder.dialogs import ( Dialog, DialogEvents, DialogSet, DialogTurnStatus, ) -from botbuilder.schema import Activity, ActivityTypes -from botframework.connector.auth import ( - ClaimsIdentity, - SkillValidation, - AuthenticationConstants, - GovernmentConstants, -) +from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes class DialogExtensions: @@ -87,6 +86,9 @@ async def run_dialog( type=ActivityTypes.end_of_conversation, value=result.result, locale=turn_context.activity.locale, + code=EndOfConversationCodes.completed_successfully + if result.status == DialogTurnStatus.Complete + else EndOfConversationCodes.user_cancelled, ) await turn_context.send_activity(activity)
diff --git a/libraries/botbuilder-dialogs/tests/test_dialogextensions.py b/libraries/botbuilder-dialogs/tests/test_dialogextensions.py --- a/libraries/botbuilder-dialogs/tests/test_dialogextensions.py +++ b/libraries/botbuilder-dialogs/tests/test_dialogextensions.py @@ -7,6 +7,7 @@ import aiounittest +from botframework.connector.auth import ClaimsIdentity, AuthenticationConstants from botbuilder.core import ( TurnContext, MessageFactory, @@ -28,8 +29,7 @@ TranscriptLoggerMiddleware, ConsoleTranscriptLogger, ) -from botbuilder.schema import ActivityTypes, Activity -from botframework.connector.auth import ClaimsIdentity, AuthenticationConstants +from botbuilder.schema import ActivityTypes, Activity, EndOfConversationCodes from botbuilder.dialogs import ( ComponentDialog, TextPrompt, @@ -111,6 +111,7 @@ async def handles_bot_and_skills_test_cases( self.eoc_sent, "Skills should send EndConversation to channel" ) assert ActivityTypes.end_of_conversation == self.eoc_sent.type + assert EndOfConversationCodes.completed_successfully == self.eoc_sent.code assert self.eoc_sent.value == "SomeName" else: self.assertIsNone(
[PORT] Add EndOfConversationCodes to EndOfConversation activity from Skill > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4235 Fixes https://github.com/microsoft/botframework-sdk/issues/5852 # Changed projects * Microsoft.Bot.Builder.Dialogs * Microsoft.Bot.Builder.Dialogs.Tests
2020-10-07T15:40:38
microsoft/botbuilder-python
1,403
microsoft__botbuilder-python-1403
[ "1394" ]
a30301b5d46d25bbd377b91097db915bed6c16f0
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py @@ -27,7 +27,9 @@ def teams_get_team_info(activity: Activity) -> TeamInfo: return None -def teams_notify_user(activity: Activity): +def teams_notify_user( + activity: Activity, alert_in_meeting: bool = None, external_resource_url: str = None +): if not activity: return @@ -36,4 +38,6 @@ def teams_notify_user(activity: Activity): channel_data = TeamsChannelData().deserialize(activity.channel_data) channel_data.notification = NotificationInfo(alert=True) + channel_data.notification.alert_in_meeting = alert_in_meeting + channel_data.notification.external_resource_url = external_resource_url activity.channel_data = channel_data diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -940,11 +940,22 @@ class NotificationInfo(Model): _attribute_map = { "alert": {"key": "alert", "type": "bool"}, + "alert_in_meeting": {"key": "alertInMeeting", "type": "bool"}, + "external_resource_url": {"key": "externalResourceUrl", "type": "str"}, } - def __init__(self, *, alert: bool = None, **kwargs) -> None: + def __init__( + self, + *, + alert: bool = None, + alert_in_meeting: bool = None, + external_resource_url: str = None, + **kwargs + ) -> None: super(NotificationInfo, self).__init__(**kwargs) self.alert = alert + self.alert_in_meeting = alert_in_meeting + self.external_resource_url = external_resource_url class O365ConnectorCard(Model):
Meeting notification alert bubble (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/6026)
2020-10-07T19:39:27
microsoft/botbuilder-python
1,413
microsoft__botbuilder-python-1413
[ "920" ]
30bce7885e944e0a27c6b40dbe830259ded67f97
diff --git a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py --- a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py @@ -62,9 +62,9 @@ async def on_send_to_conversation( This method allows you to send an activity to the end of a conversation. This is slightly different from ReplyToActivity(). - * SendToConversation(conversationId) - will append the activity to the end + * SendToConversation(conversation_id) - will append the activity to the end of the conversation according to the timestamp or semantics of the channel. - * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply + * ReplyToActivity(conversation_id,ActivityId) - adds the activity as a reply to another activity, if the channel supports it. If the channel does not support nested replies, ReplyToActivity falls back to SendToConversation. @@ -97,9 +97,9 @@ async def on_reply_to_activity( This method allows you to reply to an activity. This is slightly different from SendToConversation(). - * SendToConversation(conversationId) - will append the activity to the end + * SendToConversation(conversation_id) - will append the activity to the end of the conversation according to the timestamp or semantics of the channel. - * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply + * ReplyToActivity(conversation_id,ActivityId) - adds the activity as a reply to another activity, if the channel supports it. If the channel does not support nested replies, ReplyToActivity falls back to SendToConversation. @@ -111,6 +111,8 @@ async def on_reply_to_activity( :type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity` :param conversation_id:The conversation ID. :type conversation_id: str + :param activity_id: Activity ID to send. + :type activity_id: str :param activity: Activity to send. :type activity: Activity :return: @@ -119,13 +121,66 @@ async def on_reply_to_activity( claims_identity, conversation_id, activity_id, activity, ) - async def _process_activity( + async def on_delete_activity( + self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str + ): + skill_conversation_reference = await self._get_skill_conversation_reference( + conversation_id + ) + + async def callback(turn_context: TurnContext): + turn_context.turn_state[ + self.SKILL_CONVERSATION_REFERENCE_KEY + ] = skill_conversation_reference + await turn_context.delete_activity(activity_id) + + await self._adapter.continue_conversation( + skill_conversation_reference.conversation_reference, + callback, + claims_identity=claims_identity, + audience=skill_conversation_reference.oauth_scope, + ) + + async def on_update_activity( self, claims_identity: ClaimsIdentity, conversation_id: str, - reply_to_activity_id: str, + activity_id: str, activity: Activity, ) -> ResourceResponse: + skill_conversation_reference = await self._get_skill_conversation_reference( + conversation_id + ) + + resource_response: ResourceResponse = None + + async def callback(turn_context: TurnContext): + nonlocal resource_response + turn_context.turn_state[ + self.SKILL_CONVERSATION_REFERENCE_KEY + ] = skill_conversation_reference + activity.apply_conversation_reference( + skill_conversation_reference.conversation_reference + ) + turn_context.activity.id = activity_id + turn_context.activity.caller_id = ( + f"{CallerIdConstants.bot_to_bot_prefix}" + f"{JwtTokenValidation.get_app_id_from_claims(claims_identity.claims)}" + ) + resource_response = await turn_context.update_activity(activity) + + await self._adapter.continue_conversation( + skill_conversation_reference.conversation_reference, + callback, + claims_identity=claims_identity, + audience=skill_conversation_reference.oauth_scope, + ) + + return resource_response or ResourceResponse(id=str(uuid4()).replace("-", "")) + + async def _get_skill_conversation_reference( + self, conversation_id: str + ) -> SkillConversationReference: # Get the SkillsConversationReference conversation_reference_result = await self._conversation_id_factory.get_conversation_reference( conversation_id @@ -135,11 +190,10 @@ async def _process_activity( # or a ConversationReference (the old way, but still here for compatibility). If a # ConversationReference is returned, build a new SkillConversationReference to simplify # the remainder of this method. - skill_conversation_reference: SkillConversationReference = None if isinstance(conversation_reference_result, SkillConversationReference): - skill_conversation_reference = conversation_reference_result + skill_conversation_reference: SkillConversationReference = conversation_reference_result else: - skill_conversation_reference = SkillConversationReference( + skill_conversation_reference: SkillConversationReference = SkillConversationReference( conversation_reference=conversation_reference_result, oauth_scope=( GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE @@ -154,6 +208,19 @@ async def _process_activity( if not skill_conversation_reference.conversation_reference: raise KeyError("conversationReference not found") + return skill_conversation_reference + + async def _process_activity( + self, + claims_identity: ClaimsIdentity, + conversation_id: str, + reply_to_activity_id: str, + activity: Activity, + ) -> ResourceResponse: + skill_conversation_reference = await self._get_skill_conversation_reference( + conversation_id + ) + # If an activity is sent, return the ResourceResponse resource_response: ResourceResponse = None
diff --git a/libraries/botbuilder-core/tests/skills/test_skill_handler.py b/libraries/botbuilder-core/tests/skills/test_skill_handler.py --- a/libraries/botbuilder-core/tests/skills/test_skill_handler.py +++ b/libraries/botbuilder-core/tests/skills/test_skill_handler.py @@ -1,5 +1,6 @@ import hashlib import json +from datetime import datetime from uuid import uuid4 from asyncio import Future from typing import Dict, List, Callable @@ -204,6 +205,8 @@ async def test_on_send_to_conversation(self): self._conversation_id = await self._test_id_factory.create_skill_conversation_id( self._conversation_reference ) + # python 3.7 doesn't support AsyncMock, change this when min ver is 3.8 + send_activities_called = False mock_adapter = Mock() @@ -214,36 +217,55 @@ async def continue_conversation( claims_identity: ClaimsIdentity = None, audience: str = None, ): # pylint: disable=unused-argument - await callback( - TurnContext( - mock_adapter, - conversation_reference_extension.get_continuation_activity( - self._conversation_reference - ), - ) + # Invoke the callback created by the handler so we can assert the rest of the execution. + turn_context = TurnContext( + mock_adapter, + conversation_reference_extension.get_continuation_activity( + self._conversation_reference + ), ) + await callback(turn_context) + + # Assert the callback set the right properties. + assert ( + f"{CallerIdConstants.bot_to_bot_prefix}{self.skill_id}" + ), turn_context.activity.caller_id async def send_activities( context: TurnContext, activities: List[Activity] ): # pylint: disable=unused-argument + # Messages should not have a caller id set when sent back to the caller. + nonlocal send_activities_called + assert activities[0].caller_id is None + assert activities[0].reply_to_id is None + send_activities_called = True return [ResourceResponse(id="resourceId")] mock_adapter.continue_conversation = continue_conversation mock_adapter.send_activities = send_activities - sut = self.create_skill_handler_for_testing(mock_adapter) - - activity = Activity(type=ActivityTypes.message, attachments=[], entities=[]) - TurnContext.apply_conversation_reference(activity, self._conversation_reference) - - assert not activity.caller_id + types_to_test = [ + ActivityTypes.end_of_conversation, + ActivityTypes.event, + ActivityTypes.message, + ] + + for activity_type in types_to_test: + with self.subTest(act_type=activity_type): + send_activities_called = False + activity = Activity(type=activity_type, attachments=[], entities=[]) + TurnContext.apply_conversation_reference( + activity, self._conversation_reference + ) + sut = self.create_skill_handler_for_testing(mock_adapter) - resource_response = await sut.test_on_send_to_conversation( - self._claims_identity, self._conversation_id, activity - ) + resource_response = await sut.test_on_send_to_conversation( + self._claims_identity, self._conversation_id, activity + ) - assert activity.caller_id is None - assert resource_response.id == "resourceId" + if activity_type == ActivityTypes.message: + assert send_activities_called + assert resource_response.id == "resourceId" async def test_forwarding_on_send_to_conversation(self): self._conversation_id = await self._test_id_factory.create_skill_conversation_id( @@ -282,69 +304,186 @@ async def side_effect( assert response.id is resource_response_id async def test_on_reply_to_activity(self): + resource_response_id = "resourceId" self._conversation_id = await self._test_id_factory.create_skill_conversation_id( self._conversation_reference ) - mock_adapter = Mock() - mock_adapter.continue_conversation = MagicMock(return_value=Future()) - mock_adapter.continue_conversation.return_value.set_result(Mock()) - mock_adapter.send_activities = MagicMock(return_value=Future()) - mock_adapter.send_activities.return_value.set_result([]) + types_to_test = [ + ActivityTypes.end_of_conversation, + ActivityTypes.event, + ActivityTypes.message, + ] + + for activity_type in types_to_test: + with self.subTest(act_type=activity_type): + mock_adapter = Mock() + mock_adapter.continue_conversation = MagicMock(return_value=Future()) + mock_adapter.continue_conversation.return_value.set_result(Mock()) + mock_adapter.send_activities = MagicMock(return_value=Future()) + mock_adapter.send_activities.return_value.set_result( + [ResourceResponse(id=resource_response_id)] + ) - sut = self.create_skill_handler_for_testing(mock_adapter) + sut = self.create_skill_handler_for_testing(mock_adapter) - activity = Activity(type=ActivityTypes.message, attachments=[], entities=[]) - activity_id = str(uuid4()) - TurnContext.apply_conversation_reference(activity, self._conversation_reference) + activity = Activity(type=activity_type, attachments=[], entities=[]) + activity_id = str(uuid4()) + TurnContext.apply_conversation_reference( + activity, self._conversation_reference + ) - await sut.test_on_reply_to_activity( - self._claims_identity, self._conversation_id, activity_id, activity - ) + resource_response = await sut.test_on_reply_to_activity( + self._claims_identity, self._conversation_id, activity_id, activity + ) - args, kwargs = mock_adapter.continue_conversation.call_args_list[0] + # continue_conversation validation + ( + args_continue, + kwargs_continue, + ) = mock_adapter.continue_conversation.call_args_list[0] + mock_adapter.continue_conversation.assert_called_once() - assert isinstance(args[0], ConversationReference) - assert callable(args[1]) - assert isinstance(kwargs["claims_identity"], ClaimsIdentity) + assert isinstance(args_continue[0], ConversationReference) + assert callable(args_continue[1]) + assert isinstance(kwargs_continue["claims_identity"], ClaimsIdentity) + + turn_context = TurnContext( + mock_adapter, + conversation_reference_extension.get_continuation_activity( + self._conversation_reference + ), + ) + await args_continue[1](turn_context) + # assert the callback set the right properties. + assert ( + f"{CallerIdConstants.bot_to_bot_prefix}{self.skill_id}" + ), turn_context.activity.caller_id + + if activity_type == ActivityTypes.message: + # send_activities validation + (args_send, _,) = mock_adapter.send_activities.call_args_list[0] + activity_from_send = args_send[1][0] + assert activity_from_send.caller_id is None + assert activity_from_send.reply_to_id, activity_id + assert resource_response.id, resource_response_id + else: + # Assert mock SendActivitiesAsync wasn't called + mock_adapter.send_activities.assert_not_called() + + async def test_on_update_activity(self): + self._conversation_id = await self._test_id_factory.create_skill_conversation_id( + self._conversation_reference + ) + resource_response_id = "resourceId" + called_continue = False + called_update = False - await args[1]( - TurnContext( + mock_adapter = Mock() + activity = Activity(type=ActivityTypes.message, attachments=[], entities=[]) + activity_id = str(uuid4()) + message = activity.text = f"TestUpdate {datetime.now()}." + + async def continue_conversation( + reference: ConversationReference, + callback: Callable, + bot_id: str = None, + claims_identity: ClaimsIdentity = None, + audience: str = None, + ): # pylint: disable=unused-argument + # Invoke the callback created by the handler so we can assert the rest of the execution. + nonlocal called_continue + turn_context = TurnContext( mock_adapter, conversation_reference_extension.get_continuation_activity( self._conversation_reference ), ) + await callback(turn_context) + + # Assert the callback set the right properties. + assert ( + f"{CallerIdConstants.bot_to_bot_prefix}{self.skill_id}" + ), turn_context.activity.caller_id + called_continue = True + + async def update_activity( + context: TurnContext, # pylint: disable=unused-argument + new_activity: Activity, + ) -> ResourceResponse: + # Assert the activity being sent. + nonlocal called_update + assert activity_id, new_activity.reply_to_id + assert message, new_activity.text + called_update = True + + return ResourceResponse(id=resource_response_id) + + mock_adapter.continue_conversation = continue_conversation + mock_adapter.update_activity = update_activity + + sut = self.create_skill_handler_for_testing(mock_adapter) + resource_response = await sut.test_on_update_activity( + self._claims_identity, self._conversation_id, activity_id, activity ) - assert activity.caller_id is None - async def test_on_update_activity(self): - self._conversation_id = "" + assert called_continue + assert called_update + assert resource_response, resource_response_id - mock_adapter = Mock() + async def test_on_delete_activity(self): + self._conversation_id = await self._test_id_factory.create_skill_conversation_id( + self._conversation_reference + ) - sut = self.create_skill_handler_for_testing(mock_adapter) + resource_response_id = "resourceId" + called_continue = False + called_delete = False - activity = Activity(type=ActivityTypes.message, attachments=[], entities=[]) + mock_adapter = Mock() activity_id = str(uuid4()) - with self.assertRaises(BotActionNotImplementedError): - await sut.test_on_update_activity( - self._claims_identity, self._conversation_id, activity_id, activity + async def continue_conversation( + reference: ConversationReference, + callback: Callable, + bot_id: str = None, + claims_identity: ClaimsIdentity = None, + audience: str = None, + ): # pylint: disable=unused-argument + # Invoke the callback created by the handler so we can assert the rest of the execution. + nonlocal called_continue + turn_context = TurnContext( + mock_adapter, + conversation_reference_extension.get_continuation_activity( + self._conversation_reference + ), ) + await callback(turn_context) + called_continue = True - async def test_on_delete_activity(self): - self._conversation_id = "" + async def delete_activity( + context: TurnContext, # pylint: disable=unused-argument + conversation_reference: ConversationReference, + ) -> ResourceResponse: + # Assert the activity being sent. + nonlocal called_delete + # Assert the activity_id being deleted. + assert activity_id, conversation_reference.activity_id + called_delete = True - mock_adapter = Mock() + return ResourceResponse(id=resource_response_id) + + mock_adapter.continue_conversation = continue_conversation + mock_adapter.delete_activity = delete_activity sut = self.create_skill_handler_for_testing(mock_adapter) - activity_id = str(uuid4()) - with self.assertRaises(BotActionNotImplementedError): - await sut.test_on_delete_activity( - self._claims_identity, self._conversation_id, activity_id - ) + await sut.test_on_delete_activity( + self._claims_identity, self._conversation_id, activity_id + ) + + assert called_continue + assert called_delete async def test_on_get_activity_members(self): self._conversation_id = ""
I can Update and Delete activities from a Skill (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/5788)
Hi @axelsrz, the dotnet PR for this one has been merged into master and can be ported to Python whenever you are ready.
2020-10-20T00:38:39
microsoft/botbuilder-python
1,417
microsoft__botbuilder-python-1417
[ "1408" ]
ae2a56e513c4a911b3a293c86f65e51d20d19c39
diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py @@ -61,6 +61,7 @@ from ._models_py3 import TeamsMeetingInfo from ._models_py3 import TeamsMeetingParticipant from ._models_py3 import MeetingParticipantInfo +from ._models_py3 import CacheInfo __all__ = [ "AppBasedLinkQuery", @@ -123,4 +124,5 @@ "TeamsMeetingInfo", "TeamsMeetingParticipant", "MeetingParticipantInfo", + "CacheInfo", ] diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -71,6 +71,28 @@ def __init__(self, *, id: str = None, name: str = None, **kwargs) -> None: self.name = name +class CacheInfo(Model): + """A cache info object which notifies Teams how long an object should be cached for. + + :param cache_type: Type of Cache Info + :type cache_type: str + :param cache_duration: Duration of the Cached Info. + :type cache_duration: int + """ + + _attribute_map = { + "cache_type": {"key": "cacheType", "type": "str"}, + "cache_duration": {"key": "cacheDuration", "type": "int"}, + } + + def __init__( + self, *, cache_type: str = None, cache_duration: int = None, **kwargs + ) -> None: + super(CacheInfo, self).__init__(**kwargs) + self.cache_type = cache_type + self.cache_duration = cache_duration + + class ConversationList(Model): """List of channels under a team. @@ -699,6 +721,8 @@ class MessagingExtensionActionResponse(Model): :param compose_extension: :type compose_extension: ~botframework.connector.teams.models.MessagingExtensionResult + :param cache_info: CacheInfo for this MessagingExtensionActionResponse. + :type cache_info: ~botframework.connector.teams.models.CacheInfo """ _attribute_map = { @@ -707,12 +731,21 @@ class MessagingExtensionActionResponse(Model): "key": "composeExtension", "type": "MessagingExtensionResult", }, + "cache_info": {"key": "cacheInfo", "type": "CacheInfo"}, } - def __init__(self, *, task=None, compose_extension=None, **kwargs) -> None: + def __init__( + self, + *, + task=None, + compose_extension=None, + cache_info: CacheInfo = None, + **kwargs + ) -> None: super(MessagingExtensionActionResponse, self).__init__(**kwargs) self.task = task self.compose_extension = compose_extension + self.cache_info = cache_info class MessagingExtensionAttachment(Attachment): @@ -849,8 +882,9 @@ class MessagingExtensionResponse(Model): """Messaging extension response. :param compose_extension: - :type compose_extension: - ~botframework.connector.teams.models.MessagingExtensionResult + :type compose_extension: ~botframework.connector.teams.models.MessagingExtensionResult + :param cache_info: CacheInfo for this MessagingExtensionResponse. + :type cache_info: ~botframework.connector.teams.models.CacheInfo """ _attribute_map = { @@ -858,11 +892,13 @@ class MessagingExtensionResponse(Model): "key": "composeExtension", "type": "MessagingExtensionResult", }, + "cache_info": {"key": "cacheInfo", "type": CacheInfo}, } - def __init__(self, *, compose_extension=None, **kwargs) -> None: + def __init__(self, *, compose_extension=None, cache_info=None, **kwargs) -> None: super(MessagingExtensionResponse, self).__init__(**kwargs) self.compose_extension = compose_extension + self.cache_info = cache_info class MessagingExtensionResult(Model): @@ -1671,15 +1707,19 @@ class TaskModuleResponse(Model): :param task: The JSON for the Adaptive card to appear in the task module. :type task: ~botframework.connector.teams.models.TaskModuleResponseBase + :param cache_info: CacheInfo for this TaskModuleResponse. + :type cache_info: ~botframework.connector.teams.models.CacheInfo """ _attribute_map = { "task": {"key": "task", "type": "TaskModuleResponseBase"}, + "cache_info": {"key": "cacheInfo", "type": "CacheInfo"}, } - def __init__(self, *, task=None, **kwargs) -> None: + def __init__(self, *, task=None, cache_info=None, **kwargs) -> None: super(TaskModuleResponse, self).__init__(**kwargs) self.task = task + self.cache_info = cache_info class TaskModuleTaskInfo(Model):
Add support for CacheInfo in Bot invoke response (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/6006)
Spoke with @tracyboehrer and this land today or tomorrow.
2020-10-21T19:09:55
microsoft/botbuilder-python
1,418
microsoft__botbuilder-python-1418
[ "1405" ]
80438ef1d512db331d665edfa94d2035026b8927
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py @@ -529,7 +529,7 @@ async def on_conversation_update_activity(self, turn_context: TurnContext): channel_data.channel, channel_data.team, turn_context ) if channel_data.event_type == "teamRenamed": - return await self.on_teams_team_renamed_activity( + return await self.on_teams_team_renamed( channel_data.team, turn_context ) if channel_data.event_type == "teamRestored": @@ -600,10 +600,27 @@ async def on_teams_team_hard_deleted( # pylint: disable=unused-argument """ return + async def on_teams_team_renamed( # pylint: disable=unused-argument + self, team_info: TeamInfo, turn_context: TurnContext + ): + """ + Invoked when a Team Renamed event activity is received from the connector. + Team Renamed correspond to the user renaming an existing team. + + :param team_info: The team info object representing the team. + :param turn_context: A context object for this turn. + + :returns: A task that represents the work queued to execute. + """ + return await self.on_teams_team_renamed_activity(team_info, turn_context) + async def on_teams_team_renamed_activity( # pylint: disable=unused-argument self, team_info: TeamInfo, turn_context: TurnContext ): """ + DEPRECATED. Please use on_teams_team_renamed(). This method will remain in place throughout + v4 so as not to break existing bots. + Invoked when a Team Renamed event activity is received from the connector. Team Renamed correspond to the user renaming an existing team.
Teams added, deleted and renamed API quite confusing? ## Version 4.10.1 ## Describe the bug While testing the application on_teams_members_added , on_teams_members_removed is working fine but on_teams_team_renamed is not working instead of we have to use on_teams_team_renamed_activity api. **Note** : channel added , renamed , deleted events is working fine. ## Expected behavior on_teams_team_renamed api should be work.
@rvinothrajendran There is no internal method called `on_teams_team_renamed()`. Please only override the methods found in the [`teams_activity_handler`](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py). In this case, you want to stick with [`on_teams_team_renamed_activity()`](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py#L603). You can also see we have this in [the docs, here](https://docs.microsoft.com/en-us/azure/bot-service/bot-builder-basics-teams?view=azure-bot-service-4.0&tabs=python#teams-bot-logic). Thanks @mdrichardson My searching went wrong 😊 APIs names are in same standard. 1. on_teams_members_added 2. on_teams_members_removed 3. on_teams_channel_created 4. on_teams_channel_renamed 5. on_teams_channel_deleted but on_teams_team_renamed_activity api name is different, suffix “**activity**” has added is this any specific reason ? @rvinothrajendran I see what you mean. @axelsrz @tracyboehrer I think the naming of `on_teams_team_renamed_activity` may have been a mistake. Would it make sense to add an overload named `on_teams_team_renamed` (because Python SDK is GA, right?), or should we leave this as-is?
2020-10-22T16:31:58
microsoft/botbuilder-python
1,427
microsoft__botbuilder-python-1427
[ "1423" ]
23315efbbef9858e95a99fa3b64666fe5c9209fe
diff --git a/libraries/botbuilder-core/botbuilder/core/turn_context.py b/libraries/botbuilder-core/botbuilder/core/turn_context.py --- a/libraries/botbuilder-core/botbuilder/core/turn_context.py +++ b/libraries/botbuilder-core/botbuilder/core/turn_context.py @@ -18,6 +18,10 @@ class TurnContext: + + # Same constant as in the BF Adapter, duplicating here to avoid circular dependency + _INVOKE_RESPONSE_KEY = "BotFrameworkAdapter.InvokeResponse" + def __init__(self, adapter_or_context, request: Activity = None): """ Creates a new TurnContext instance. @@ -202,6 +206,11 @@ async def logic(): responses = [] for activity in output: self.buffered_reply_activities.append(activity) + # Ensure the TurnState has the InvokeResponseKey, since this activity + # is not being sent through the adapter, where it would be added to TurnState. + if activity.type == ActivityTypes.invoke_response: + self.turn_state[TurnContext._INVOKE_RESPONSE_KEY] = activity + responses.append(ResourceResponse()) if sent_non_trace_activity: diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py @@ -244,6 +244,8 @@ async def _send_to_skill( # Process replies in the response.Body. response.body: List[Activity] response.body = ExpectedReplies().deserialize(response.body).activities + # Track sent invoke responses, so more than one is not sent. + sent_invoke_response = False for from_skill_activity in response.body: if from_skill_activity.type == ActivityTypes.end_of_conversation: @@ -254,12 +256,18 @@ async def _send_to_skill( await self.dialog_options.conversation_id_factory.delete_conversation_reference( skill_conversation_id ) - elif await self._intercept_oauth_cards( + elif not sent_invoke_response and await self._intercept_oauth_cards( context, from_skill_activity, self.dialog_options.connection_name ): - # do nothing. Token exchange succeeded, so no oauthcard needs to be shown to the user - pass + # Token exchange succeeded, so no oauthcard needs to be shown to the user + sent_invoke_response = True else: + # If an invoke response has already been sent we should ignore future invoke responses as this + # represents a bug in the skill. + if from_skill_activity.type == ActivityTypes.invoke_response: + if sent_invoke_response: + continue + sent_invoke_response = True # Send the response back to the channel. await context.send_activity(from_skill_activity)
Port: Fix issues with ExpectReplies and Invoke in SkillDialog While adding BufferedReplies, type of InvokeResponse should be added to TurnState: ```cs if (activity.Type == ActivityTypesEx.InvokeResponse) { TurnState.Add(BotFrameworkAdapter.InvokeResponseKey, activity); } ``` https://github.com/microsoft/botbuilder-dotnet/pull/4845 And, on the Skill Dialog side, only one InvokeResponse should be sent.
Hi @axelsrz, would you be able to address this one and fix it for the next R11 rc? Thanks
2020-10-28T17:49:07
microsoft/botbuilder-python
1,431
microsoft__botbuilder-python-1431
[ "670" ]
8493dc8860a0aebc51571a329a3eafaa185c8474
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/waterfall_dialog.py @@ -164,7 +164,7 @@ def get_step_name(self, index: int) -> str: """ step_name = self._steps[index].__qualname__ - if not step_name or ">" in step_name: + if not step_name or step_name.endswith("<lambda>"): step_name = f"Step{index + 1}of{len(self._steps)}" return step_name
diff --git a/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py b/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py --- a/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py +++ b/libraries/botbuilder-applicationinsights/tests/test_telemetry_waterfall.py @@ -1,7 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from unittest.mock import MagicMock +from unittest.mock import create_autospec, MagicMock from typing import Dict import aiounittest from botbuilder.core.adapters import TestAdapter, TestFlow @@ -14,6 +14,8 @@ ) from botbuilder.dialogs import ( Dialog, + DialogInstance, + DialogReason, DialogSet, WaterfallDialog, DialogTurnResult, @@ -83,11 +85,10 @@ async def exec_test(turn_context: TurnContext) -> None: await tf4.assert_reply("ending WaterfallDialog.") # assert - telemetry_calls = [ ("WaterfallStart", {"DialogId": "test"}), - ("WaterfallStep", {"DialogId": "test", "StepName": "Step1of2"}), - ("WaterfallStep", {"DialogId": "test", "StepName": "Step2of2"}), + ("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}), + ("WaterfallStep", {"DialogId": "test", "StepName": step2.__qualname__}), ] self.assert_telemetry_calls(telemetry, telemetry_calls) @@ -138,15 +139,49 @@ async def exec_test(turn_context: TurnContext) -> None: # assert telemetry_calls = [ ("WaterfallStart", {"DialogId": "test"}), - ("WaterfallStep", {"DialogId": "test", "StepName": "Step1of2"}), - ("WaterfallStep", {"DialogId": "test", "StepName": "Step2of2"}), + ("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}), + ("WaterfallStep", {"DialogId": "test", "StepName": step2.__qualname__}), ("WaterfallComplete", {"DialogId": "test"}), ("WaterfallStart", {"DialogId": "test"}), - ("WaterfallStep", {"DialogId": "test", "StepName": "Step1of2"}), + ("WaterfallStep", {"DialogId": "test", "StepName": step1.__qualname__}), ] - print(str(telemetry.track_event.call_args_list)) self.assert_telemetry_calls(telemetry, telemetry_calls) + async def test_cancelling_waterfall_telemetry(self): + # Arrange + dialog_id = "waterfall" + index = 0 + guid = "(guid)" + + async def my_waterfall_step(step) -> DialogTurnResult: + await step.context.send_activity("step1 response") + return Dialog.end_of_turn + + dialog = WaterfallDialog(dialog_id, [my_waterfall_step]) + + telemetry_client = create_autospec(NullTelemetryClient) + dialog.telemetry_client = telemetry_client + + dialog_instance = DialogInstance() + dialog_instance.id = dialog_id + dialog_instance.state = {"instanceId": guid, "stepIndex": index} + + # Act + await dialog.end_dialog( + TurnContext(TestAdapter(), Activity()), + dialog_instance, + DialogReason.CancelCalled, + ) + + # Assert + telemetry_props = telemetry_client.track_event.call_args_list[0][0][1] + + self.assertEqual(3, len(telemetry_props)) + self.assertEqual(dialog_id, telemetry_props["DialogId"]) + self.assertEqual(my_waterfall_step.__qualname__, telemetry_props["StepName"]) + self.assertEqual(guid, telemetry_props["InstanceId"]) + telemetry_client.track_event.assert_called_once() + def assert_telemetry_call( self, telemetry_mock, index: int, event_name: str, props: Dict[str, str] ) -> None:
[PORT] Create test for waterfall cancellation telemetry > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/3314 For https://github.com/microsoft/botbuilder-js/issues/1619 # Changed projects * Microsoft.Bot.Builder.Dialogs.Tests
@tomlm - Should I take care of this?
2020-11-18T23:47:30
microsoft/botbuilder-python
1,433
microsoft__botbuilder-python-1433
[ "1218" ]
d00c0e11544538c2b8f1f48caa2ec238c9c31202
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py @@ -69,26 +69,30 @@ async def begin_dialog(self, dialog_id: str, options: object = None): :param dialog_id: ID of the dialog to start :param options: (Optional) additional argument(s) to pass to the dialog being started. """ - if not dialog_id: - raise TypeError("Dialog(): dialogId cannot be None.") - # Look up dialog - dialog = await self.find_dialog(dialog_id) - if dialog is None: - raise Exception( - "'DialogContext.begin_dialog(): A dialog with an id of '%s' wasn't found." - " The dialog must be included in the current or parent DialogSet." - " For example, if subclassing a ComponentDialog you can call add_dialog() within your constructor." - % dialog_id - ) - # Push new instance onto stack - instance = DialogInstance() - instance.id = dialog_id - instance.state = {} - - self._stack.insert(0, (instance)) - - # Call dialog's begin_dialog() method - return await dialog.begin_dialog(self, options) + try: + if not dialog_id: + raise TypeError("Dialog(): dialogId cannot be None.") + # Look up dialog + dialog = await self.find_dialog(dialog_id) + if dialog is None: + raise Exception( + "'DialogContext.begin_dialog(): A dialog with an id of '%s' wasn't found." + " The dialog must be included in the current or parent DialogSet." + " For example, if subclassing a ComponentDialog you can call add_dialog() within your constructor." + % dialog_id + ) + # Push new instance onto stack + instance = DialogInstance() + instance.id = dialog_id + instance.state = {} + + self._stack.insert(0, (instance)) + + # Call dialog's begin_dialog() method + return await dialog.begin_dialog(self, options) + except Exception as err: + self.__set_exception_context_data(err) + raise # TODO: Fix options: PromptOptions instead of object async def prompt(self, dialog_id: str, options) -> DialogTurnResult: @@ -99,13 +103,17 @@ async def prompt(self, dialog_id: str, options) -> DialogTurnResult: :param options: Contains a Prompt, potentially a RetryPrompt and if using ChoicePrompt, Choices. :return: """ - if not dialog_id: - raise TypeError("DialogContext.prompt(): dialogId cannot be None.") + try: + if not dialog_id: + raise TypeError("DialogContext.prompt(): dialogId cannot be None.") - if not options: - raise TypeError("DialogContext.prompt(): options cannot be None.") + if not options: + raise TypeError("DialogContext.prompt(): options cannot be None.") - return await self.begin_dialog(dialog_id, options) + return await self.begin_dialog(dialog_id, options) + except Exception as err: + self.__set_exception_context_data(err) + raise async def continue_dialog(self): """ @@ -114,20 +122,25 @@ async def continue_dialog(self): to determine if a dialog was run and a reply was sent to the user. :return: """ - # Check for a dialog on the stack - if self.active_dialog is not None: - # Look up dialog - dialog = await self.find_dialog(self.active_dialog.id) - if not dialog: - raise Exception( - "DialogContext.continue_dialog(): Can't continue dialog. A dialog with an id of '%s' wasn't found." - % self.active_dialog.id - ) - - # Continue execution of dialog - return await dialog.continue_dialog(self) - - return DialogTurnResult(DialogTurnStatus.Empty) + try: + # Check for a dialog on the stack + if self.active_dialog is not None: + # Look up dialog + dialog = await self.find_dialog(self.active_dialog.id) + if not dialog: + raise Exception( + "DialogContext.continue_dialog(): Can't continue dialog. " + "A dialog with an id of '%s' wasn't found." + % self.active_dialog.id + ) + + # Continue execution of dialog + return await dialog.continue_dialog(self) + + return DialogTurnResult(DialogTurnStatus.Empty) + except Exception as err: + self.__set_exception_context_data(err) + raise # TODO: instance is DialogInstance async def end_dialog(self, result: object = None): @@ -142,22 +155,27 @@ async def end_dialog(self, result: object = None): :param result: (Optional) result to pass to the parent dialogs. :return: """ - await self.end_active_dialog(DialogReason.EndCalled) - - # Resume previous dialog - if self.active_dialog is not None: - # Look up dialog - dialog = await self.find_dialog(self.active_dialog.id) - if not dialog: - raise Exception( - "DialogContext.EndDialogAsync(): Can't resume previous dialog." - " A dialog with an id of '%s' wasn't found." % self.active_dialog.id - ) - - # Return result to previous dialog - return await dialog.resume_dialog(self, DialogReason.EndCalled, result) - - return DialogTurnResult(DialogTurnStatus.Complete, result) + try: + await self.end_active_dialog(DialogReason.EndCalled) + + # Resume previous dialog + if self.active_dialog is not None: + # Look up dialog + dialog = await self.find_dialog(self.active_dialog.id) + if not dialog: + raise Exception( + "DialogContext.EndDialogAsync(): Can't resume previous dialog." + " A dialog with an id of '%s' wasn't found." + % self.active_dialog.id + ) + + # Return result to previous dialog + return await dialog.resume_dialog(self, DialogReason.EndCalled, result) + + return DialogTurnResult(DialogTurnStatus.Complete, result) + except Exception as err: + self.__set_exception_context_data(err) + raise async def cancel_all_dialogs(self): """ @@ -165,12 +183,16 @@ async def cancel_all_dialogs(self): :param result: (Optional) result to pass to the parent dialogs. :return: """ - if self.stack: - while self.stack: - await self.end_active_dialog(DialogReason.CancelCalled) - return DialogTurnResult(DialogTurnStatus.Cancelled) + try: + if self.stack: + while self.stack: + await self.end_active_dialog(DialogReason.CancelCalled) + return DialogTurnResult(DialogTurnStatus.Cancelled) - return DialogTurnResult(DialogTurnStatus.Empty) + return DialogTurnResult(DialogTurnStatus.Empty) + except Exception as err: + self.__set_exception_context_data(err) + raise async def find_dialog(self, dialog_id: str) -> Dialog: """ @@ -179,11 +201,15 @@ async def find_dialog(self, dialog_id: str) -> Dialog: :param dialog_id: ID of the dialog to search for. :return: """ - dialog = await self.dialogs.find(dialog_id) + try: + dialog = await self.dialogs.find(dialog_id) - if dialog is None and self.parent is not None: - dialog = await self.parent.find_dialog(dialog_id) - return dialog + if dialog is None and self.parent is not None: + dialog = await self.parent.find_dialog(dialog_id) + return dialog + except Exception as err: + self.__set_exception_context_data(err) + raise async def replace_dialog( self, dialog_id: str, options: object = None @@ -195,29 +221,37 @@ async def replace_dialog( :param options: (Optional) additional argument(s) to pass to the new dialog. :return: """ - # End the current dialog and giving the reason. - await self.end_active_dialog(DialogReason.ReplaceCalled) + try: + # End the current dialog and giving the reason. + await self.end_active_dialog(DialogReason.ReplaceCalled) - # Start replacement dialog - return await self.begin_dialog(dialog_id, options) + # Start replacement dialog + return await self.begin_dialog(dialog_id, options) + except Exception as err: + self.__set_exception_context_data(err) + raise async def reprompt_dialog(self): """ Calls reprompt on the currently active dialog, if there is one. Used with Prompts that have a reprompt behavior. :return: """ - # Check for a dialog on the stack - if self.active_dialog is not None: - # Look up dialog - dialog = await self.find_dialog(self.active_dialog.id) - if not dialog: - raise Exception( - "DialogSet.reprompt_dialog(): Can't find A dialog with an id of '%s'." - % self.active_dialog.id - ) - - # Ask dialog to re-prompt if supported - await dialog.reprompt_dialog(self.context, self.active_dialog) + try: + # Check for a dialog on the stack + if self.active_dialog is not None: + # Look up dialog + dialog = await self.find_dialog(self.active_dialog.id) + if not dialog: + raise Exception( + "DialogSet.reprompt_dialog(): Can't find A dialog with an id of '%s'." + % self.active_dialog.id + ) + + # Ask dialog to re-prompt if supported + await dialog.reprompt_dialog(self.context, self.active_dialog) + except Exception as err: + self.__set_exception_context_data(err) + raise async def end_active_dialog(self, reason: DialogReason): instance = self.active_dialog @@ -230,3 +264,23 @@ async def end_active_dialog(self, reason: DialogReason): # Pop dialog off stack self._stack.pop(0) + + def __set_exception_context_data(self, exception: Exception): + if not hasattr(exception, "data"): + exception.data = {} + + if not type(self).__name__ in exception.data: + stack = [] + current_dc = self + + while current_dc is not None: + stack = stack + [x.id for x in current_dc.stack] + current_dc = current_dc.parent + + exception.data[type(self).__name__] = { + "active_dialog": None + if self.active_dialog is None + else self.active_dialog.id, + "parent": None if self.parent is None else self.parent.active_dialog.id, + "stack": self.stack, + }
diff --git a/libraries/botbuilder-dialogs/tests/test_activity_prompt.py b/libraries/botbuilder-dialogs/tests/test_activity_prompt.py --- a/libraries/botbuilder-dialogs/tests/test_activity_prompt.py +++ b/libraries/botbuilder-dialogs/tests/test_activity_prompt.py @@ -215,3 +215,91 @@ async def aux_validator(prompt_context: PromptValidatorContext): step1 = await adapter.send("hello") step2 = await step1.assert_reply("please send an event.") await step2.assert_reply("please send an event.") + + async def test_activity_prompt_onerror_should_return_dialogcontext(self): + # Create ConversationState with MemoryStorage and register the state as middleware. + convo_state = ConversationState(MemoryStorage()) + + # Create a DialogState property, DialogSet and AttachmentPrompt. + dialog_state = convo_state.create_property("dialog_state") + dialogs = DialogSet(dialog_state) + dialogs.add(SimpleActivityPrompt("EventActivityPrompt", validator)) + + async def exec_test(turn_context: TurnContext): + dialog_context = await dialogs.create_context(turn_context) + + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + options = PromptOptions( + prompt=Activity( + type=ActivityTypes.message, text="please send an event." + ) + ) + + try: + await dialog_context.prompt("EventActivityPrompt", options) + await dialog_context.prompt("Non existent id", options) + except Exception as err: + self.assertIsNotNone( + err.data["DialogContext"] # pylint: disable=no-member + ) + self.assertEqual( + err.data["DialogContext"][ # pylint: disable=no-member + "active_dialog" + ], + "EventActivityPrompt", + ) + else: + raise Exception("Should have thrown an error.") + + elif results.status == DialogTurnStatus.Complete: + await turn_context.send_activity(results.result) + + await convo_state.save_changes(turn_context) + + # Initialize TestAdapter. + adapter = TestAdapter(exec_test) + + await adapter.send("hello") + + async def test_activity_replace_dialog_onerror_should_return_dialogcontext(self): + # Create ConversationState with MemoryStorage and register the state as middleware. + convo_state = ConversationState(MemoryStorage()) + + # Create a DialogState property, DialogSet and AttachmentPrompt. + dialog_state = convo_state.create_property("dialog_state") + dialogs = DialogSet(dialog_state) + dialogs.add(SimpleActivityPrompt("EventActivityPrompt", validator)) + + async def exec_test(turn_context: TurnContext): + dialog_context = await dialogs.create_context(turn_context) + + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + options = PromptOptions( + prompt=Activity( + type=ActivityTypes.message, text="please send an event." + ) + ) + + try: + await dialog_context.prompt("EventActivityPrompt", options) + await dialog_context.replace_dialog("Non existent id", options) + except Exception as err: + self.assertIsNotNone( + err.data["DialogContext"] # pylint: disable=no-member + ) + else: + raise Exception("Should have thrown an error.") + + elif results.status == DialogTurnStatus.Complete: + await turn_context.send_activity(results.result) + + await convo_state.save_changes(turn_context) + + # Initialize TestAdapter. + adapter = TestAdapter(exec_test) + + await adapter.send("hello")
[PORT] Emit better error messages for all dialogs > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4153 Fixes #3980 ## Description Change dialogContext to capture exceptions and annotate exception with contextual information about where the exception happened. ## Specific Changes * Add try/catch handlers to dialogContext methods to set Exception.Data["dialogContext'] with metadata about the context that the exception was thrown. ## Testing * Updated existing OnError testing to test and validate for the dialog context data ### Example exception metadata ``` { ..., "data": { "DialogContext": { "ActiveDialog": "SetProperty[=`foo`]", "Parent": "innerDialog", "Stack": [ "SetProperty[=`foo`]", "innerDialog", "planningTest" ], } } ``` # Changed projects * Microsoft.Bot.Builder.Dialogs * Microsoft.Bot.Builder.Dialogs.Tests
2020-11-20T14:49:41
microsoft/botbuilder-python
1,451
microsoft__botbuilder-python-1451
[ "1450" ]
63b9e9261fe2ed8527b9957d1f519a52e66807f9
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -12,7 +12,7 @@ "PyJWT==1.5.3", "botbuilder-schema==4.12.0", "adal==1.2.1", - "msal==1.2.0", + "msal==1.6.0", ] root = os.path.abspath(os.path.dirname(__file__))
dependecy conflict between botframework 4.11.0 and azure-identity 1.5.0 ## Version 4.11 (also happening with 4.10) ## Describe the bug `botframework-connector == 4.11.0` (current) requires `msal == 1.2.0` `azure-identity == 1.5.0` (current) requires `msal >=1.6.0,<2.0.0` This created a dependency conflict where bot libraries can't coexist in the same program. This used to work a couple of months ago (I bumped into this issue after revisiting some code I had worked on before). ## To Reproduce This is my `requirements.txt` file, just add it and run `pipenv install -r requirements.txt` (versions pinned to : ``` botbuilder-core == 4.11 azure-keyvault-secrets azure-identity == 1.5 botbuilder-ai == 4.11 ``` ## Expected behavior Packages should install without conflict ## Screenshots Extract from the error message `pipenv install` shows: ``` [pipenv.exceptions.ResolutionFailure]: Warning: Your dependencies could not be resolved. You likely have a mismatch in your sub-dependencies. First try clearing your dependency cache with $ pipenv lock --clear, then try the original command again. Alternatively, you can use $ pipenv install --skip-lock to bypass this mechanism, then run $ pipenv graph to inspect the situation. Hint: try $ pipenv lock --pre if it is a pre-release dependency. ERROR: ERROR: Could not find a version that matches msal<2.0.0,==1.2.0,>=0.4.1,>=1.6.0 Tried: 0.1.0, 0.1.0, 0.2.0, 0.2.0, 0.3.0, 0.3.0, 0.3.1, 0.3.1, 0.4.0, 0.4.0, 0.4.1, 0.4.1, 0.5.0, 0.5.0, 0.5.1, 0.5.1, 0.6.0, 0.6.0, 0.6.1, 0.6.1, 0.7.0, 0.7.0, 0.8.0, 0.8.0, 0.8.0, 0.9.0, 0.9.0, 1.0.0, 1.0.0, 1.1.0, 1.1.0, 1.2.0, 1.2.0, 1.3.0, 1.3.0, 1.4.0, 1.4.0, 1.4.1, 1.4.1, 1.4.2, 1.4.2, 1.4.3, 1.4.3, 1.5.0, 1.5.0, 1.5.1, 1.5.1, 1.6.0, 1.6.0, 1.7.0, 1.7.0, 1.8.0, 1.8.0 There are incompatible versions in the resolved dependencies. ``` Relevant extract from the output of `pipenv graph` as per the suggestion above: ``` azure-identity==1.5.0 - msal [required: >=1.6.0,<2.0.0, installed: 1.2.0] - msal-extensions [required: ~=0.3.0, installed: 0.3.0] - msal [required: >=0.4.1,<2.0.0, installed: 1.2.0] azure-keyvault-secrets==4.2.0 botbuilder-ai==4.11.0 - botbuilder-core [required: ==4.11.0, installed: 4.11.0] - botframework-connector [required: ==4.11.0, installed: 4.11.0] - msal [required: ==1.2.0, installed: 1.2.0] ``` ## Additional context This issue was also reported in [botbuilder-samples repo's issue 2978](https://github.com/microsoft/BotBuilder-Samples/issues/2978)
@axelsrz , could you give us your opinion on this issue? Thanks for putting this in our radar @asdkant-ey, we will work this milestone to update our supported msal version @axelsrz In the meantime what can I do? Is there a combination of library versions in particular I can use that works without going too far back in time?
2020-12-19T00:31:36
microsoft/botbuilder-python
1,476
microsoft__botbuilder-python-1476
[ "1462" ]
86903ab93ed19ed94fcc3393f99f04b9de60d1b4
diff --git a/libraries/botbuilder-core/botbuilder/core/activity_handler.py b/libraries/botbuilder-core/botbuilder/core/activity_handler.py --- a/libraries/botbuilder-core/botbuilder/core/activity_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/activity_handler.py @@ -381,9 +381,9 @@ async def on_installation_update( # pylint: disable=unused-argument :type turn_context: :class:`botbuilder.core.TurnContext` :returns: A task that represents the work queued to execute """ - if turn_context.activity.action == "add": + if turn_context.activity.action in ("add", "add-upgrade"): return await self.on_installation_update_add(turn_context) - if turn_context.activity.action == "remove": + if turn_context.activity.action in ("remove", "remove-upgrade"): return await self.on_installation_update_remove(turn_context) return
diff --git a/libraries/botbuilder-core/tests/test_activity_handler.py b/libraries/botbuilder-core/tests/test_activity_handler.py --- a/libraries/botbuilder-core/tests/test_activity_handler.py +++ b/libraries/botbuilder-core/tests/test_activity_handler.py @@ -268,7 +268,23 @@ async def test_on_installation_update_add(self): assert bot.record[0] == "on_installation_update" assert bot.record[1] == "on_installation_update_add" - async def test_on_installation_update_add_remove(self): + async def test_on_installation_update_add_upgrade(self): + activity = Activity( + type=ActivityTypes.installation_update, action="add-upgrade" + ) + + adapter = TestInvokeAdapter() + turn_context = TurnContext(adapter, activity) + + # Act + bot = TestingActivityHandler() + await bot.on_turn(turn_context) + + assert len(bot.record) == 2 + assert bot.record[0] == "on_installation_update" + assert bot.record[1] == "on_installation_update_add" + + async def test_on_installation_update_remove(self): activity = Activity(type=ActivityTypes.installation_update, action="remove") adapter = TestInvokeAdapter() @@ -282,6 +298,22 @@ async def test_on_installation_update_add_remove(self): assert bot.record[0] == "on_installation_update" assert bot.record[1] == "on_installation_update_remove" + async def test_on_installation_update_remove_upgrade(self): + activity = Activity( + type=ActivityTypes.installation_update, action="remove-upgrade" + ) + + adapter = TestInvokeAdapter() + turn_context = TurnContext(adapter, activity) + + # Act + bot = TestingActivityHandler() + await bot.on_turn(turn_context) + + assert len(bot.record) == 2 + assert bot.record[0] == "on_installation_update" + assert bot.record[1] == "on_installation_update_remove" + async def test_healthcheck(self): activity = Activity(type=ActivityTypes.invoke, name="healthcheck",)
Support for new installation action types (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/6139)
2021-01-26T20:04:11
microsoft/botbuilder-python
1,486
microsoft__botbuilder-python-1486
[ "1463" ]
fbd73deb3b77db856d2ad7ff2dde6850123f0426
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py @@ -23,6 +23,8 @@ O365ConnectorCardActionQuery, TaskModuleRequest, TaskModuleResponse, + TabRequest, + TabSubmit, ) from botframework.connector import Channels from ..serializer_helper import deserializer_helper @@ -163,6 +165,22 @@ async def on_invoke_activity(self, turn_context: TurnContext) -> InvokeResponse: ) ) + if turn_context.activity.name == "tab/fetch": + return self._create_invoke_response( + await self.on_teams_tab_fetch( + turn_context, + deserializer_helper(TabRequest, turn_context.activity.value), + ) + ) + + if turn_context.activity.name == "tab/submit": + return self._create_invoke_response( + await self.on_teams_tab_submit( + turn_context, + deserializer_helper(TabSubmit, turn_context.activity.value), + ) + ) + return await super().on_invoke_activity(turn_context) except _InvokeResponseException as invoke_exception: @@ -466,6 +484,32 @@ async def on_teams_task_module_submit( # pylint: disable=unused-argument """ raise _InvokeResponseException(status_code=HTTPStatus.NOT_IMPLEMENTED) + async def on_teams_tab_fetch( # pylint: disable=unused-argument + self, turn_context: TurnContext, tab_request: TabRequest + ): + """ + Override this in a derived class to provide logic for when a tab is fetched. + + :param turn_context: A context object for this turn. + :param tab_request: The tab invoke request value payload. + + :returns: A Tab Response for the request. + """ + raise _InvokeResponseException(status_code=HTTPStatus.NOT_IMPLEMENTED) + + async def on_teams_tab_submit( # pylint: disable=unused-argument + self, turn_context: TurnContext, tab_submit: TabSubmit + ): + """ + Override this in a derived class to provide logic for when a tab is submitted. + + :param turn_context: A context object for this turn. + :param tab_submit: The tab submit invoke request value payload. + + :returns: A Tab Response for the request. + """ + raise _InvokeResponseException(status_code=HTTPStatus.NOT_IMPLEMENTED) + async def on_conversation_update_activity(self, turn_context: TurnContext): """ Invoked when a conversation update activity is received from the channel. diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py @@ -62,6 +62,15 @@ from ._models_py3 import TeamsMeetingParticipant from ._models_py3 import MeetingParticipantInfo from ._models_py3 import CacheInfo +from ._models_py3 import TabContext +from ._models_py3 import TabRequest +from ._models_py3 import TabResponseCard +from ._models_py3 import TabResponseCards +from ._models_py3 import TabResponsePayload +from ._models_py3 import TabSubmit +from ._models_py3 import TabSubmitData +from ._models_py3 import TabSuggestedActions +from ._models_py3 import TaskModuleCardResponse __all__ = [ "AppBasedLinkQuery", @@ -125,4 +134,13 @@ "TeamsMeetingParticipant", "MeetingParticipantInfo", "CacheInfo", + "TabContext", + "TabRequest", + "TabResponseCard", + "TabResponseCards", + "TabResponsePayload", + "TabSubmit", + "TabSubmitData", + "TabSuggestedActions", + "TaskModuleCardResponse", ] diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -10,6 +10,27 @@ ) +class TabEntityContext(Model): + """ + Current TabRequest entity context, or 'tabEntityId'. + + :param tab_entity_id: Gets or sets the entity id of the tab. + :type tab_entity_id: str + """ + + _attribute_map = { + "tab_entity_id": {"key": "tabEntityId", "type": "str"}, + } + + def __init__(self, *, tab_entity_id=None, **kwargs) -> None: + super(TabEntityContext, self).__init__(**kwargs) + self.tab_entity_id = tab_entity_id + self._custom_init() + + def _custom_init(self): + return + + class TaskModuleRequest(Model): """Task module invoke request value payload. @@ -18,17 +39,24 @@ class TaskModuleRequest(Model): :param context: Current user context, i.e., the current theme :type context: ~botframework.connector.teams.models.TaskModuleRequestContext + :param tab_entity_context: Gets or sets current tab request context. + :type tab_entity_context: + ~botframework.connector.teams.models.TabEntityContext """ _attribute_map = { "data": {"key": "data", "type": "object"}, "context": {"key": "context", "type": "TaskModuleRequestContext"}, + "tab_entity_context": {"key": "tabContext", "type": "TabEntityContext"}, } - def __init__(self, *, data=None, context=None, **kwargs) -> None: + def __init__( + self, *, data=None, context=None, tab_entity_context=None, **kwargs + ) -> None: super(TaskModuleRequest, self).__init__(**kwargs) self.data = data self.context = context + self.tab_entity_context = tab_entity_context class AppBasedLinkQuery(Model): @@ -2058,3 +2086,266 @@ def __init__( self.user = user self.meeting = meeting self.conversation = conversation + + +class TabContext(Model): + """ + Current tab request context, i.e., the current theme. + + :param theme: Gets or sets the current user's theme. + :type theme: str + """ + + _attribute_map = { + "theme": {"key": "theme", "type": "str"}, + } + + def __init__(self, *, theme=None, **kwargs) -> None: + super(TabContext, self).__init__(**kwargs) + self.theme = theme + self._custom_init() + + def _custom_init(self): + return + + +class TabRequest(Model): + """ + Invoke ('tab/fetch') request value payload. + + :param tab_entity_context: Gets or sets current tab entity request context. + :type tab_entity_context: + ~botframework.connector.teams.models.TabEntityContext + :param context: Gets or sets current tab entity request context. + :type context: + ~botframework.connector.teams.models.TabContext + :param state: Gets or sets state, which is the magic code for OAuth Flow. + :type state: str + """ + + _attribute_map = { + "tab_entity_context": {"key": "tabContext", "type": "TabEntityContext"}, + "context": {"key": "context", "type": "TabContext"}, + "state": {"key": "state", "type": "str"}, + } + + def __init__( + self, *, tab_entity_context=None, context=None, state=None, **kwargs + ) -> None: + super(TabRequest, self).__init__(**kwargs) + self.tab_entity_context = tab_entity_context + self.context = context + self.state = state + self._custom_init() + + def _custom_init(self): + return + + +class TabResponseCard(Model): + """ + Envelope for cards for a Tab request. + + :param card: Gets or sets adaptive card for this card tab response. + :type card: object + """ + + _attribute_map = { + "card": {"key": "card", "type": "object"}, + } + + def __init__(self, *, card=None, **kwargs) -> None: + super(TabResponseCard, self).__init__(**kwargs) + self.card = card + self._custom_init() + + def _custom_init(self): + return + + +class TabResponseCards(Model): + """ + Envelope for cards for a TabResponse. + + :param cards: Gets or sets adaptive card for this card tab response. + :type cards: + list[ ~botframework.connector.teams.models.TabResponseCard] + """ + + _attribute_map = { + "cards": {"key": "cards", "type": "[TabResponseCard]"}, + } + + def __init__(self, *, cards=None, **kwargs) -> None: + super(TabResponseCards, self).__init__(**kwargs) + self.cards = cards + self._custom_init() + + def _custom_init(self): + return + + +class TabResponsePayload(Model): + """ + Initializes a new instance of the TabResponsePayload class. + + :param type: Gets or sets choice of action options when responding to the + tab/fetch message. Possible values include: 'continue', 'auth' or 'silentAuth' + :type type: str + :param value: Gets or sets the TabResponseCards when responding to + tab/fetch activity with type of 'continue'. + :type value: TabResponseCards + :param suggested_actions: Gets or sets the Suggested Actions for this card tab. + :type suggested_actions: TabSuggestedActions + """ + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "value": {"key": "value", "type": "TabResponseCards"}, + "suggested_actions": {"key": "suggestedActions", "type": "TabSuggestedActions"}, + } + + def __init__( + self, *, type=None, value=None, suggested_actions=None, **kwargs + ) -> None: + super(TabResponsePayload, self).__init__(**kwargs) + self.type = type + self.value = value + self.suggested_actions = suggested_actions + self._custom_init() + + def _custom_init(self): + return + + +class TabSumit(Model): + """ + Invoke ('tab/submit') request value payload. + + :param tab_entity_context: Gets or sets current tab entity request context. + :type tab_entity_context: + ~botframework.connector.teams.models.TabEntityContext + :param context: Gets or sets current tab entity request context. + :type context: + ~botframework.connector.teams.models.TabContext + :param data: User input data. Free payload containing properties of key-value pairs. + :type data: + ~botframework.connector.teams.models.TabSubmitData + """ + + _attribute_map = { + "tab_entity_context": {"key": "tabContext", "type": "TabEntityContext"}, + "context": {"key": "context", "type": "TabContext"}, + "data": {"key": "data", "type": "TabSubmitData"}, + } + + def __init__( + self, *, tab_entity_context=None, context=None, data=None, **kwargs + ) -> None: + super(TabSumit, self).__init__(**kwargs) + self.tab_entity_context = tab_entity_context + self.context = context + self.data = data + self._custom_init() + + def _custom_init(self): + return + + +class TabSubmitData(Model): + """ + Invoke ('tab/submit') request value payload data. + + :param type: Currently, 'tab/submit'. + :type type: str + :param properties: Gets or sets properties that are not otherwise defined by the TabSubmit + type but that might appear in the serialized REST JSON object. + :type properties: object + """ + + _attribute_map = { + "type": {"key": "type", "type": "str"}, + "properties": {"key": "properties", "type": "{object}"}, + } + + def __init__(self, *, type=None, properties=None, **kwargs) -> None: + super(TabSubmitData, self).__init__(**kwargs) + self.type = type + self.properties = properties + self._custom_init() + + def _custom_init(self): + return + + +class TabSubmit(Model): + """ + Initializes a new instance of the TabSubmit class. + + :param tab_entity_context: Gets or sets current tab entity request context. + :type tab_entity_context: ~botframework.connector.teams.models.TabEntityContext + :param context: Gets or sets current user context, i.e., the current theme. + :type context: ~botframework.connector.teams.models.TabContext + :param data: User input data. Free payload containing properties of key-value pairs. + :type data: ~botframework.connector.teams.models.TabSubmitData + """ + + _attribute_map = { + "tab_entity_context": {"key": "tabContext", "type": "TabEntityContext"}, + "context": {"key": "context", "type": "TabContext"}, + "data": {"key": "data", "type": "TabSubmitData"}, + } + + def __init__( + self, *, tab_entity_context=None, context=None, data=None, **kwargs + ) -> None: + super(TabSubmit, self).__init__(**kwargs) + self.tab_entity_context = tab_entity_context + self.context = context + self.data = data + self._custom_init() + + def _custom_init(self): + return + + +class TabSuggestedActions(Model): + """ + Tab SuggestedActions (Only when type is 'auth' or 'silentAuth'). + + :param actions: Gets or sets adaptive card for this card tab response. + :type actions: list[~botframework.connector.models.CardAction] + """ + + _attribute_map = { + "actions": {"key": "actions", "type": "[CardAction]"}, + } + + def __init__(self, *, actions=None, **kwargs) -> None: + super(TabSuggestedActions, self).__init__(**kwargs) + self.actions = actions + self._custom_init() + + def _custom_init(self): + return + + +class TaskModuleCardResponse(TaskModuleResponseBase): + """ + Tab Response to 'task/submit' from a tab. + + :param value: The JSON for the Adaptive cards to appear in the tab. + :type value: ~botframework.connector.teams.models.TabResponse + """ + + _attribute_map = { + "value": {"key": "value", "type": "TabResponse"}, + } + + def __init__(self, *, value=None, **kwargs) -> None: + super(TaskModuleCardResponse, self).__init__("continue", **kwargs) + self.value = value + self._custom_init() + + def _custom_init(self): + return
diff --git a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py --- a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py +++ b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py @@ -27,6 +27,9 @@ TaskModuleRequestContext, TeamInfo, TeamsChannelAccount, + TabRequest, + TabSubmit, + TabContext, ) from botframework.connector import Channels from simple_adapter import SimpleAdapter @@ -294,6 +297,18 @@ async def on_teams_task_module_submit( # pylint: disable=unused-argument turn_context, task_module_request ) + async def on_teams_tab_fetch( + self, turn_context: TurnContext, tab_request: TabRequest + ): + self.record.append("on_teams_tab_fetch") + return await super().on_teams_tab_fetch(turn_context, tab_request) + + async def on_teams_tab_submit( + self, turn_context: TurnContext, tab_submit: TabSubmit + ): + self.record.append("on_teams_tab_submit") + return await super().on_teams_tab_submit(turn_context, tab_submit) + class NotImplementedAdapter(BotAdapter): async def delete_activity( @@ -988,6 +1003,44 @@ async def test_on_teams_task_module_submit(self): assert bot.record[0] == "on_invoke_activity" assert bot.record[1] == "on_teams_task_module_submit" + async def test_on_teams_tab_fetch(self): + # Arrange + activity = Activity( + type=ActivityTypes.invoke, + name="tab/fetch", + value={"data": {"key": "value"}, "context": TabContext().serialize(),}, + ) + + turn_context = TurnContext(SimpleAdapter(), activity) + + # Act + bot = TestingTeamsActivityHandler() + await bot.on_turn(turn_context) + + # Assert + assert len(bot.record) == 2 + assert bot.record[0] == "on_invoke_activity" + assert bot.record[1] == "on_teams_tab_fetch" + + async def test_on_teams_tab_submit(self): + # Arrange + activity = Activity( + type=ActivityTypes.invoke, + name="tab/submit", + value={"data": {"key": "value"}, "context": TabContext().serialize(),}, + ) + + turn_context = TurnContext(SimpleAdapter(), activity) + + # Act + bot = TestingTeamsActivityHandler() + await bot.on_turn(turn_context) + + # Assert + assert len(bot.record) == 2 + assert bot.record[0] == "on_invoke_activity" + assert bot.record[1] == "on_teams_tab_submit" + async def test_on_end_of_conversation_activity(self): activity = Activity(type=ActivityTypes.end_of_conversation)
Adaptive Card Tabs (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/6138)
2021-02-01T08:21:50
microsoft/botbuilder-python
1,487
microsoft__botbuilder-python-1487
[ "1210" ]
1fe7513e2f1809066f5e1a8109505afb5815f7f3
diff --git a/libraries/botframework-connector/botframework/connector/auth/jwt_token_extractor.py b/libraries/botframework-connector/botframework/connector/auth/jwt_token_extractor.py --- a/libraries/botframework-connector/botframework/connector/auth/jwt_token_extractor.py +++ b/libraries/botframework-connector/botframework/connector/auth/jwt_token_extractor.py @@ -125,10 +125,16 @@ def __init__(self, url): self.last_updated = datetime.min async def get(self, key_id: str): - # If keys are more than 5 days old, refresh them - if self.last_updated < (datetime.now() - timedelta(days=5)): + # If keys are more than 1 day old, refresh them + if self.last_updated < (datetime.now() - timedelta(days=1)): await self._refresh() - return self._find(key_id) + + key = self._find(key_id) + if not key and self.last_updated < (datetime.now() - timedelta(hours=1)): + # Refresh the cache if a key is not found (max once per hour) + await self._refresh() + key = self._find(key_id) + return key async def _refresh(self): response = requests.get(self.url)
OpenIdMetadata signing keys should refresh every 24 hours, and once per hour if a key is missing ## Describe the bug Every 24 hours, and when a cert is cycled (a key is not found in the cache), the OpenIdMetadata cache within the sdk should gracefully refresh. javascript implementation reference: https://github.com/microsoft/botbuilder-js/pull/2466 python code to change: https://github.com/microsoft/botbuilder-python/blob/master/libraries/botframework-connector/botframework/connector/auth/jwt_token_extractor.py#L129 [bug]
2021-02-01T15:09:25
microsoft/botbuilder-python
1,488
microsoft__botbuilder-python-1488
[ "1416" ]
8b022921af5bc4e46139cb4816939d373e70dce1
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py @@ -40,11 +40,6 @@ async def run_dialog( # No dialogs to cancel, just return. return - remote_cancel_text = "Skill was canceled through an EndOfConversation activity from the parent." - await turn_context.send_trace_activity( - f"Extension {Dialog.__name__}.run_dialog", label=remote_cancel_text, - ) - # Send cancellation message to the dialog to ensure all the parents are canceled # in the right order. await dialog_context.cancel_all_dialogs() @@ -73,15 +68,6 @@ async def run_dialog( or result.status == DialogTurnStatus.Cancelled ): if DialogExtensions.__send_eoc_to_parent(turn_context): - end_message_text = ( - f"Dialog {dialog.id} has **completed**. Sending EndOfConversation." - ) - await turn_context.send_trace_activity( - f"Extension {Dialog.__name__}.run_dialog", - label=end_message_text, - value=result.result, - ) - activity = Activity( type=ActivityTypes.end_of_conversation, value=result.result, diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py @@ -306,12 +306,6 @@ async def handle_skill_on_turn( # Handle remote cancellation request from parent. active_dialog_context = self.get_active_dialog_context(dialog_context) - remote_cancel_text = "Skill was canceled through an EndOfConversation activity from the parent." - await turn_context.send_trace_activity( - f"{self.__class__.__name__}.on_turn_async()", - label=f"{remote_cancel_text}", - ) - # Send cancellation message to the top dialog in the stack to ensure all the parents are canceled in the # right order. return await active_dialog_context.cancel_all_dialogs(True) @@ -333,23 +327,11 @@ async def handle_skill_on_turn( turn_result = await dialog_context.continue_dialog() if turn_result.status == DialogTurnStatus.Empty: # restart root dialog - start_message_text = f"Starting {self._root_dialog_id}." - await turn_context.send_trace_activity( - f"{self.__class__.__name__}.handle_skill_on_turn_async()", - label=f"{start_message_text}", - ) turn_result = await dialog_context.begin_dialog(self._root_dialog_id) await DialogManager.send_state_snapshot_trace(dialog_context, "Skill State") if self.should_send_end_of_conversation_to_parent(turn_context, turn_result): - end_message_text = f"Dialog {self._root_dialog_id} has **completed**. Sending EndOfConversation." - await turn_context.send_trace_activity( - f"{self.__class__.__name__}.handle_skill_on_turn_async()", - label=f"{end_message_text}", - value=turn_result.result, - ) - # Send End of conversation at the end. activity = Activity( type=ActivityTypes.end_of_conversation, diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py @@ -49,11 +49,6 @@ async def begin_dialog(self, dialog_context: DialogContext, options: object = No """ dialog_args = self._validate_begin_dialog_args(options) - await dialog_context.context.send_trace_activity( - f"{SkillDialog.__name__}.BeginDialogAsync()", - label=f"Using activity of type: {dialog_args.activity.type}", - ) - # Create deep clone of the original activity to avoid altering it before forwarding it. skill_activity: Activity = deepcopy(dialog_args.activity) @@ -90,19 +85,9 @@ async def continue_dialog(self, dialog_context: DialogContext): if not self._on_validate_activity(dialog_context.context.activity): return self.end_of_turn - await dialog_context.context.send_trace_activity( - f"{SkillDialog.__name__}.continue_dialog()", - label=f"ActivityType: {dialog_context.context.activity.type}", - ) - # Handle EndOfConversation from the skill (this will be sent to the this dialog by the SkillHandler if # received from the Skill) if dialog_context.context.activity.type == ActivityTypes.end_of_conversation: - await dialog_context.context.send_trace_activity( - f"{SkillDialog.__name__}.continue_dialog()", - label=f"Got {ActivityTypes.end_of_conversation}", - ) - return await dialog_context.end_dialog( dialog_context.context.activity.value ) @@ -156,10 +141,6 @@ async def end_dialog( ): # Send of of conversation to the skill if the dialog has been cancelled. if reason in (DialogReason.CancelCalled, DialogReason.ReplaceCalled): - await context.send_trace_activity( - f"{SkillDialog.__name__}.end_dialog()", - label=f"ActivityType: {context.activity.type}", - ) activity = Activity(type=ActivityTypes.end_of_conversation) # Apply conversation reference and common properties from incoming activity before sending.
diff --git a/libraries/botbuilder-dialogs/tests/test_dialog_manager.py b/libraries/botbuilder-dialogs/tests/test_dialog_manager.py --- a/libraries/botbuilder-dialogs/tests/test_dialog_manager.py +++ b/libraries/botbuilder-dialogs/tests/test_dialog_manager.py @@ -300,35 +300,6 @@ async def test_skill_should_return_empty_on_reprompt_with_no_dialog(self): DialogTurnStatus.Empty, ) - async def test_trace_skill_state(self): - SimpleComponentDialog.dm_turn_result = None - dialog = SimpleComponentDialog() - - def assert_is_trace(activity, description): # pylint: disable=unused-argument - assert activity.type == ActivityTypes.trace - - def assert_is_trace_and_label(activity, description): - assert_is_trace(activity, description) - assert activity.label == "Skill State" - - test_flow = await SimpleComponentDialog.create_test_flow( - dialog, SkillFlowTestCase.leaf_skill, True - ) - - step1 = await test_flow.send("Hi") - step2 = await step1.assert_reply(assert_is_trace) - step2 = await step2.assert_reply("Hello, what is your name?") - step3 = await step2.assert_reply(assert_is_trace_and_label) - step4 = await step3.send("SomeName") - step5 = await step4.assert_reply("Hello SomeName, nice to meet you!") - step6 = await step5.assert_reply(assert_is_trace_and_label) - await step6.assert_reply(assert_is_trace) - - self.assertEqual( - SimpleComponentDialog.dm_turn_result.turn_result.status, - DialogTurnStatus.Complete, - ) - async def test_trace_bot_state(self): SimpleComponentDialog.dm_turn_result = None dialog = SimpleComponentDialog()
PORT: The SDK is over-verbose in generating TraceActivity records and it can't be turned off. Remove extra trace activities: https://github.com/microsoft/botbuilder-dotnet/pull/4819 (keeping only the state tarces)
2021-02-01T16:18:54
microsoft/botbuilder-python
1,504
microsoft__botbuilder-python-1504
[ "1502" ]
b458369a1d9e0da5b611534023b65eb7ccc6563e
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py @@ -16,7 +16,7 @@ DialogStateManager, DialogStateManagerConfiguration, ) -from botbuilder.schema import Activity, ActivityTypes +from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes from botframework.connector.auth import ( AuthenticationConstants, ClaimsIdentity, @@ -355,6 +355,9 @@ async def handle_skill_on_turn( type=ActivityTypes.end_of_conversation, value=turn_result.result, locale=turn_context.activity.locale, + code=EndOfConversationCodes.completed_successfully + if turn_result.status == DialogTurnStatus.Complete + else EndOfConversationCodes.user_cancelled, ) await turn_context.send_activity(activity)
diff --git a/libraries/botbuilder-dialogs/tests/test_dialog_manager.py b/libraries/botbuilder-dialogs/tests/test_dialog_manager.py --- a/libraries/botbuilder-dialogs/tests/test_dialog_manager.py +++ b/libraries/botbuilder-dialogs/tests/test_dialog_manager.py @@ -38,6 +38,7 @@ ActivityTypes, ChannelAccount, ConversationAccount, + EndOfConversationCodes, InputHints, ) from botframework.connector.auth import AuthenticationConstants, ClaimsIdentity @@ -237,6 +238,10 @@ async def test_handles_bot_and_skills(self): SimpleComponentDialog.eoc_sent.type, ActivityTypes.end_of_conversation, ) + self.assertEqual( + SimpleComponentDialog.eoc_sent.code, + EndOfConversationCodes.completed_successfully, + ) self.assertEqual(SimpleComponentDialog.eoc_sent.value, "SomeName") else: self.assertIsNone(
DialogManager does not return EoC code when a dialog ends (python) See [parent issue](https://github.com/microsoft/botframework-sdk/issues/6186)
2021-02-05T05:20:05
microsoft/botbuilder-python
1,507
microsoft__botbuilder-python-1507
[ "1500" ]
354444ef38c338f642ec41b51d18da6307b42172
diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py --- a/libraries/botbuilder-integration-aiohttp/setup.py +++ b/libraries/botbuilder-integration-aiohttp/setup.py @@ -9,6 +9,7 @@ "botbuilder-schema==4.12.0", "botframework-connector==4.12.0", "botbuilder-core==4.12.0", + "yarl<=1.4.2", "aiohttp==3.6.2", ]
Python 81.skills-skilldialog throwing error: [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain ## Sample information 1. Sample type: \samples\ 2. Sample language: python 3. Sample name: 81.skills-skilldialog ## Describe the bug When you run the sample as per the instructions, the skill bot is throwing the following error: ======== Running on http://localhost:39783 ======== (Press CTRL+C to quit) [on_turn_error] unhandled error: Cannot deserialize content-type: text/plain Traceback (most recent call last): File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_adapter.py", line 128, in run_pipeline context, callback File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/bots/skill_bot.py", line 21, in on_turn self._conversation_state.create_property("DialogState"), File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_extensions.py", line 68, in run_dialog result = await dialog_context.begin_dialog(dialog.id) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog return await dialog.begin_dialog(self, options) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 67, in begin_dialog turn_result = await self.on_begin_dialog(inner_dc, options) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/component_dialog.py", line 221, in on_begin_dialog return await inner_dc.begin_dialog(self.initial_dialog_id, options) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/dialog_context.py", line 91, in begin_dialog return await dialog.begin_dialog(self, options) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 65, in begin_dialog return await self.run_step(dialog_context, 0, DialogReason.BeginCalled, None) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 156, in run_step return await self.on_step(step_context) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/dialogs/waterfall_dialog.py", line 132, in on_step return await self._steps[step_context.index](step_context) File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 50, in process_activity return await self._on_event_activity(step_context) File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 77, in _on_event_activity return await self._begin_get_weather(step_context) File "/Users/tim/Documents/Sourcetree/BotBuilderSamples/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py", line 156, in _begin_get_weather get_weather_message, get_weather_message, InputHints.ignoring_input, File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 174, in send_activity result = await self.send_activities([activity_or_text]) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 226, in send_activities return await self._emit(self._on_send_activities, output, logic()) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 304, in _emit return await logic File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/turn_context.py", line 221, in logic responses = await self.adapter.send_activities(self, output) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 729, in send_activities raise error File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botbuilder/core/bot_framework_adapter.py", line 715, in send_activities activity.conversation.id, activity.reply_to_id, activity File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/botframework/connector/aio/operations_async/_conversations_operations_async.py", line 529, in reply_to_activity request, stream=False, **operation_config File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/async_client.py", line 115, in async_send pipeline_response = await self.config.pipeline.run(request, **kwargs) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 159, in run return await first_node.send(pipeline_request, **kwargs) # type: ignore File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 79, in send response = await self.next.send(request, **kwargs) # type: ignore File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_requests.py", line 106, in send return await self.next.send(request, **kwargs) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/async_abc.py", line 84, in send self._policy.on_response(request, response, **kwargs) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 252, in on_response http_response.headers File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 226, in deserialize_from_http_generics return cls.deserialize_from_text(body_bytes, content_type) File "/Users/tim/.pyenv/versions/bot379/lib/python3.7/site-packages/msrest/pipeline/universal.py", line 203, in deserialize_from_text raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) msrest.exceptions.DeserializationError: Cannot deserialize content-type: text/plain ## To Reproduce Steps to reproduce the behavior: 1. Run the root & skill bots as per the instructions from the sample readme 2. Start the bot framework emulator & connect 3. Choose the DialogSkillBot 4. Enter activity 3 ## Expected behavior Error not returned
As requested by @v-kydela I tried commenting out the two calls to send_activity_trace in the skillbot & re-running the test - [here](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/python/81.skills-skilldialog/dialog-skill-bot/dialogs/activity_router_dialog.py#L44-L47) & [here](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/python/81.skills-skilldialog/dialog-skill-bot/skill_adapter_with_error_handler.py#L57-L62). The error still happens. Hello @wonboyn, I couldn't reproduce this issue using v4.11.0 of the botbuilder packages and the latest version of this sample. I tried with clean installations of Python 3.7.9 and 3.8.7 With those configurations, the sample runs as expected. But I noticed that there's an error in the requirements.txt given that it doesn't include neither botbuilder-dialogs nor botbuilder-ai, which are necessary for the skill bot to run. There is a possibility that you might've been running the sample with those packages outdated. Could you please double check the version of all botbuilder packages that you used? Please let me know if you keep encountering this problem. Hi @axelsrz I am using python v3.7.9 & botbuilder v4.11.0 - albeit with pyenv: % pip list Package Version ------------------------------------- --------- adal 1.2.1 aiohttp 3.6.2 aiounittest 1.4.0 async-timeout 3.0.1 attrs 20.3.0 azure-cognitiveservices-language-luis 0.2.0 azure-common 1.1.26 Babel 2.7.0 botbuilder-ai 4.11.0 botbuilder-core 4.11.0 botbuilder-dialogs 4.11.0 botbuilder-integration-aiohttp 4.11.0 botbuilder-schema 4.11.0 botframework-connector 4.11.0 certifi 2020.12.5 cffi 1.14.4 chardet 3.0.4 cryptography 2.8 datatypes-date-time 1.0.0a2 datedelta 1.3 emoji 0.6.0 grapheme 0.6.0 idna 2.10 isodate 0.6.0 jsonpickle 1.2 msal 1.2.0 msrest 0.6.10 msrestazure 0.6.4 multidict 4.7.6 multipledispatch 0.6.0 oauthlib 3.1.0 pip 20.1.1 pycparser 2.20 PyJWT 1.5.3 python-dateutil 2.8.1 pytz 2020.4 recognizers-text 1.0.2a2 recognizers-text-choice 1.0.2a2 recognizers-text-date-time 1.0.2a2 recognizers-text-number 1.0.2a2 recognizers-text-number-with-unit 1.0.2a2 regex 2019.8.19 requests 2.23.0 requests-oauthlib 1.3.0 setuptools 47.1.0 six 1.15.0 typing-extensions 3.7.4.3 urllib3 1.25.11 wrapt 1.12.1 yarl 1.6.3 Thanks for the reply @wonboyn. Running the scenarios again I realized that in my previous test I was using Python 3.7.9 and 3.8.7 only for the Skill bot, the parent was always running in 3.8.7. After testing running the parent in 3.7.9 I was able to reproduce. This looks like a product issue (I will transfer this to the python repo in the following days) and we should fix it in the course of our current release cycle - 4.12.0. In the meantime, a workaround would be to run at least your parent bot in 3.8 (you can leave the skill in 3.7 or 3.8). Let me know if you have further questions. @axelsrz Is this still on your radar?
2021-02-06T01:59:39
microsoft/botbuilder-python
1,523
microsoft__botbuilder-python-1523
[ "1200" ]
9bf779e8b826b8f969656b2997685ab6a845578c
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_container.py @@ -4,6 +4,7 @@ from abc import ABC, abstractmethod +from botbuilder.core import NullTelemetryClient, BotTelemetryClient from .dialog import Dialog from .dialog_context import DialogContext from .dialog_event import DialogEvent @@ -17,6 +18,31 @@ def __init__(self, dialog_id: str = None): self.dialogs = DialogSet() + @property + def telemetry_client(self) -> BotTelemetryClient: + """ + Gets the telemetry client for logging events. + """ + return self._telemetry_client + + @telemetry_client.setter + def telemetry_client(self, value: BotTelemetryClient) -> None: + """ + Sets the telemetry client for all dialogs in this set. + """ + if value is None: + self._telemetry_client = NullTelemetryClient() + else: + self._telemetry_client = value + + # Care! Dialogs.TelemetryClient assignment internally assigns the + # TelemetryClient for each dialog which could lead to an eventual stack + # overflow in cyclical dialog structures. + # Don't set the telemetry client if the candidate instance is the same as + # the currently set one. + if self.dialogs.telemetry_client != value: + self.dialogs.telemetry_client = self._telemetry_client + @abstractmethod def create_child_context(self, dialog_context: DialogContext) -> DialogContext: raise NotImplementedError() diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_set.py @@ -4,7 +4,13 @@ from hashlib import sha256 from typing import Dict -from botbuilder.core import TurnContext, BotAssert, StatePropertyAccessor +from botbuilder.core import ( + NullTelemetryClient, + BotTelemetryClient, + TurnContext, + BotAssert, + StatePropertyAccessor, +) from .dialog import Dialog from .dialog_state import DialogState @@ -34,11 +40,31 @@ def __init__(self, dialog_state: StatePropertyAccessor = None): del frame self._dialog_state = dialog_state - # self.__telemetry_client = NullBotTelemetryClient.Instance; + self.__telemetry_client = NullTelemetryClient() self._dialogs: Dict[str, Dialog] = {} self._version: str = None + @property + def telemetry_client(self) -> BotTelemetryClient: + """ + Gets the telemetry client for logging events. + """ + return self.__telemetry_client + + @telemetry_client.setter + def telemetry_client(self, value: BotTelemetryClient) -> None: + """ + Sets the telemetry client for all dialogs in this set. + """ + if value is None: + self.__telemetry_client = NullTelemetryClient() + else: + self.__telemetry_client = value + + for dialog in self._dialogs.values(): + dialog.telemetry_client = self.__telemetry_client + def get_version(self) -> str: """ Gets a unique string which represents the combined versions of all dialogs in this this dialogset.
diff --git a/libraries/botbuilder-dialogs/tests/test_dialog_set.py b/libraries/botbuilder-dialogs/tests/test_dialog_set.py --- a/libraries/botbuilder-dialogs/tests/test_dialog_set.py +++ b/libraries/botbuilder-dialogs/tests/test_dialog_set.py @@ -2,8 +2,15 @@ # Licensed under the MIT License. import aiounittest -from botbuilder.dialogs import DialogSet, ComponentDialog -from botbuilder.core import ConversationState, MemoryStorage +from botbuilder.dialogs import DialogSet, ComponentDialog, WaterfallDialog +from botbuilder.core import ConversationState, MemoryStorage, NullTelemetryClient + + +class MyBotTelemetryClient(NullTelemetryClient): + # pylint: disable=useless-return + def __init__(self): + super().__init__() + return class DialogSetTests(aiounittest.AsyncTestCase): @@ -18,3 +25,85 @@ def test_dialogset_constructor_null_property(self): def test_dialogset_constructor_null_from_componentdialog(self): ComponentDialog("MyId") + + def test_dialogset_telemetryset(self): + convo_state = ConversationState(MemoryStorage()) + dialog_state_property = convo_state.create_property("dialogstate") + dialog_set = DialogSet(dialog_state_property) + + dialog_set.add(WaterfallDialog("A")) + dialog_set.add(WaterfallDialog("B")) + + self.assertTrue( + isinstance( + dialog_set.find_dialog("A").telemetry_client, NullTelemetryClient + ) + ) + self.assertTrue( + isinstance( + dialog_set.find_dialog("B").telemetry_client, NullTelemetryClient + ) + ) + + dialog_set.telemetry_client = MyBotTelemetryClient() + + self.assertTrue( + isinstance( + dialog_set.find_dialog("A").telemetry_client, MyBotTelemetryClient + ) + ) + self.assertTrue( + isinstance( + dialog_set.find_dialog("B").telemetry_client, MyBotTelemetryClient + ) + ) + + def test_dialogset_nulltelemetryset(self): + convo_state = ConversationState(MemoryStorage()) + dialog_state_property = convo_state.create_property("dialogstate") + dialog_set = DialogSet(dialog_state_property) + + dialog_set.add(WaterfallDialog("A")) + dialog_set.add(WaterfallDialog("B")) + + dialog_set.telemetry_client = MyBotTelemetryClient() + dialog_set.telemetry_client = None + + self.assertFalse( + isinstance( + dialog_set.find_dialog("A").telemetry_client, MyBotTelemetryClient + ) + ) + self.assertFalse( + isinstance( + dialog_set.find_dialog("B").telemetry_client, MyBotTelemetryClient + ) + ) + self.assertTrue( + isinstance( + dialog_set.find_dialog("A").telemetry_client, NullTelemetryClient + ) + ) + self.assertTrue( + isinstance( + dialog_set.find_dialog("B").telemetry_client, NullTelemetryClient + ) + ) + + # pylint: disable=pointless-string-statement + """ + This test will be enabled when telematry tests are fixed for DialogSet telemetry + def test_dialogset_addtelemetryset(self): + convo_state = ConversationState(MemoryStorage()) + dialog_state_property = convo_state.create_property("dialogstate") + dialog_set = DialogSet(dialog_state_property) + + dialog_set.add(WaterfallDialog("A")) + dialog_set.add(WaterfallDialog("B")) + + dialog_set.telemetry_client = MyBotTelemetryClient() + + dialog_set.add(WaterfallDialog("C")) + + self.assertTrue(isinstance(dialog_set.find_dialog("C").telemetry_client, MyBotTelemetryClient)) + """
[PORT] Move TelemetryClient property to DialogContainer > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4178 Fixes #2638 Move TelemetryClient onto DialogContainer (implementation the same in AdaptiveDialog and ComponentDialog). # Changed projects * Microsoft.Bot.Builder.Dialogs.Adaptive * Microsoft.Bot.Builder.Dialogs
2021-02-12T19:44:08
microsoft/botbuilder-python
1,579
microsoft__botbuilder-python-1579
[ "1571" ]
7c4dedec6a11b23ee15617e15e0af9f0961cf46b
diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py --- a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py +++ b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py @@ -67,13 +67,6 @@ def get_qna_prompts_card(result: QueryResult, card_no_match_text: str) -> Activi for prompt in result.context.prompts ] - # Add No match text - button_list.append( - CardAction( - value=card_no_match_text, type="imBack", title=card_no_match_text, - ) - ) - attachment = CardFactory.hero_card(HeroCard(buttons=button_list)) return Activity(
diff --git a/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_ActiveLearning.json b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_ActiveLearning.json new file mode 100644 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_ActiveLearning.json @@ -0,0 +1,65 @@ +{ + "answers": [ + { + "questions": [ + "Esper seeks" + ], + "answer": "Esper seeks. She's a curious little explorer. Young toddlers seek out new adventures, expanding their knowledge base. It's their job to test limits, to learn about them. It's the adult's job to enforce the limits, while also allowing room for exploration", + "score": 79.65, + "id": 35, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + }, + { + "questions": [ + "Esper sups" + ], + "answer": "Esper sups. She eats just about anything. She loves her broccoli. Anything that she sees her parents eating, she wants to part take in herself.\n\nCaution though. If she spots you eating dessert, you best be prepared to share with her. Best to wait until she goes down for bed and then sneak your favorite snack in, without her prying eyes.", + "score": 79.65, + "id": 36, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + }, + { + "questions": [ + "Esper screams" + ], + "answer": "Esper screams. The currently 1-year old toddler has a brain that's rapidly developing, expanding to new abilities at an alarming rate. With it may come fright or possibly frustration as they understand what could be done, however they need to master how to do a task themselves", + "score": 66.89, + "id": 34, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + }, + { + "questions": [ + "Esper sleeps" + ], + "answer": "Esper sleeps. Esper sleeps on her floor bed. She never had a crib, as her parents placed her directly on the floor bed since birth. With this comes the benefit of not having to have an awkward transition period from crib to bed, when she gets old enough.\n\nThe idea of using the bed is that it offers the child more freedom to move about--more autonomy. Downside is, they will definitely wander off the bed, when they don't want to sleep", + "score": 65.71, + "id": 33, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [] + } + } + ], + "activeLearningEnabled": true +} \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer1.json b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer1.json new file mode 100644 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer1.json @@ -0,0 +1,32 @@ +{ + "answers": [ + { + "questions": [ + "Tell me about birds", + "What do you know about birds" + ], + "answer": "Choose one of the following birds to get more info", + "score": 100.0, + "id": 37, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": false, + "prompts": [ + { + "displayOrder": 1, + "qnaId": 38, + "displayText": "Bald Eagle" + }, + { + "displayOrder": 2, + "qnaId": 39, + "displayText": "Hummingbird" + } + ] + } + } + ], + "activeLearningEnabled": true +} \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer2.json b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer2.json new file mode 100644 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_data/QnAMakerDialog_MultiTurn_Answer2.json @@ -0,0 +1,20 @@ +{ + "answers": [ + { + "questions": [ + "Bald Eagle" + ], + "answer": "Apparently these guys aren't actually bald!", + "score": 100.0, + "id": 38, + "source": "Editorial", + "isDocumentText": false, + "metadata": [], + "context": { + "isContextOnly": true, + "prompts": [] + } + } + ], + "activeLearningEnabled": true +} \ No newline at end of file diff --git a/libraries/botbuilder-ai/tests/qna/test_qna_dialog.py b/libraries/botbuilder-ai/tests/qna/test_qna_dialog.py new file mode 100644 --- /dev/null +++ b/libraries/botbuilder-ai/tests/qna/test_qna_dialog.py @@ -0,0 +1,165 @@ +import json +from os import path +from unittest.mock import patch +import aiounittest + +# from botbuilder.ai.qna import QnAMakerEndpoint, QnAMaker, QnAMakerOptions +from botbuilder.ai.qna.dialogs import QnAMakerDialog +from botbuilder.schema import Activity, ActivityTypes +from botbuilder.core import ConversationState, MemoryStorage, TurnContext +from botbuilder.core.adapters import TestAdapter, TestFlow +from botbuilder.dialogs import DialogSet, DialogTurnStatus + + +class QnaMakerDialogTest(aiounittest.AsyncTestCase): + # Note this is NOT a real QnA Maker application ID nor a real QnA Maker subscription-key + # theses are GUIDs edited to look right to the parsing and validation code. + + _knowledge_base_id: str = "f028d9k3-7g9z-11d3-d300-2b8x98227q8w" + _endpoint_key: str = "1k997n7w-207z-36p3-j2u1-09tas20ci6011" + _host: str = "https://dummyqnahost.azurewebsites.net/qnamaker" + + _tell_me_about_birds: str = "Tell me about birds" + _choose_bird: str = "Choose one of the following birds to get more info" + _bald_eagle: str = "Bald Eagle" + _esper: str = "Esper" + + DEFAULT_ACTIVE_LEARNING_TITLE: str = "Did you mean:" + DEFAULT_NO_MATCH_TEXT: str = "None of the above." + DEFAULT_CARD_NO_MATCH_RESPONSE: str = "Thanks for the feedback." + + async def test_multiturn_dialog(self): + # Set Up QnAMakerDialog + convo_state = ConversationState(MemoryStorage()) + dialog_state = convo_state.create_property("dialogState") + dialogs = DialogSet(dialog_state) + + qna_dialog = QnAMakerDialog( + self._knowledge_base_id, self._endpoint_key, self._host + ) + dialogs.add(qna_dialog) + + # Callback that runs the dialog + async def execute_qna_dialog(turn_context: TurnContext) -> None: + if turn_context.activity.type != ActivityTypes.message: + raise TypeError( + "Failed to execute QnA dialog. Should have received a message activity." + ) + + response_json = self._get_json_res(turn_context.activity.text) + dialog_context = await dialogs.create_context(turn_context) + with patch( + "aiohttp.ClientSession.post", + return_value=aiounittest.futurized(response_json), + ): + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + await dialog_context.begin_dialog("QnAMakerDialog") + + await convo_state.save_changes(turn_context) + + # Send and receive messages from QnA dialog + test_adapter = TestAdapter(execute_qna_dialog) + test_flow = TestFlow(None, test_adapter) + tf2 = await test_flow.send(self._tell_me_about_birds) + dialog_reply: Activity = tf2.adapter.activity_buffer[0] + self._assert_has_valid_hero_card_buttons(dialog_reply, button_count=2) + tf3 = await tf2.assert_reply(self._choose_bird) + tf4 = await tf3.send(self._bald_eagle) + await tf4.assert_reply("Apparently these guys aren't actually bald!") + + async def test_active_learning(self): + # Set Up QnAMakerDialog + convo_state = ConversationState(MemoryStorage()) + dialog_state = convo_state.create_property("dialogState") + dialogs = DialogSet(dialog_state) + + qna_dialog = QnAMakerDialog( + self._knowledge_base_id, self._endpoint_key, self._host + ) + dialogs.add(qna_dialog) + + # Callback that runs the dialog + async def execute_qna_dialog(turn_context: TurnContext) -> None: + if turn_context.activity.type != ActivityTypes.message: + raise TypeError( + "Failed to execute QnA dialog. Should have received a message activity." + ) + + response_json = self._get_json_res(turn_context.activity.text) + dialog_context = await dialogs.create_context(turn_context) + with patch( + "aiohttp.ClientSession.post", + return_value=aiounittest.futurized(response_json), + ): + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + await dialog_context.begin_dialog("QnAMakerDialog") + + await convo_state.save_changes(turn_context) + + # Send and receive messages from QnA dialog + test_adapter = TestAdapter(execute_qna_dialog) + test_flow = TestFlow(None, test_adapter) + tf2 = await test_flow.send(self._esper) + dialog_reply: Activity = tf2.adapter.activity_buffer[0] + self._assert_has_valid_hero_card_buttons(dialog_reply, button_count=3) + tf3 = await tf2.assert_reply(self.DEFAULT_ACTIVE_LEARNING_TITLE) + tf4 = await tf3.send(self.DEFAULT_NO_MATCH_TEXT) + await tf4.assert_reply(self.DEFAULT_CARD_NO_MATCH_RESPONSE) + + print(tf2) + + def _assert_has_valid_hero_card_buttons( + self, activity: Activity, button_count: int + ): + self.assertIsInstance(activity, Activity) + attachments = activity.attachments + self.assertTrue(attachments) + self.assertEqual(len(attachments), 1) + buttons = attachments[0].content.buttons + button_count_err = ( + f"Should have only received {button_count} buttons in multi-turn prompt" + ) + + if activity.text == self._choose_bird: + self.assertEqual(len(buttons), button_count, button_count_err) + self.assertEqual(buttons[0].value, self._bald_eagle) + self.assertEqual(buttons[1].value, "Hummingbird") + + if activity.text == self.DEFAULT_ACTIVE_LEARNING_TITLE: + self.assertEqual(len(buttons), button_count, button_count_err) + self.assertEqual(buttons[0].value, "Esper seeks") + self.assertEqual(buttons[1].value, "Esper sups") + self.assertEqual(buttons[2].value, self.DEFAULT_NO_MATCH_TEXT) + + def _get_json_res(self, text: str) -> object: + if text == self._tell_me_about_birds: + return QnaMakerDialogTest._get_json_for_file( + "QnAMakerDialog_MultiTurn_Answer1.json" + ) + + if text == self._bald_eagle: + return QnaMakerDialogTest._get_json_for_file( + "QnAMakerDialog_MultiTurn_Answer2.json" + ) + + if text == self._esper: + return QnaMakerDialogTest._get_json_for_file( + "QnAMakerDialog_ActiveLearning.json" + ) + + return None + + @staticmethod + def _get_json_for_file(response_file: str) -> object: + curr_dir = path.dirname(path.abspath(__file__)) + response_path = path.join(curr_dir, "test_data", response_file) + + with open(response_path, "r", encoding="utf-8-sig") as file: + response_str = file.read() + response_json = json.loads(response_str) + + return response_json
"no match response" button present in qna maker dialog when active learning is disabled Python tracking issue for repo code-owners See original issue for details: microsoft/botframework-sdk#6146
2021-03-19T18:50:57
microsoft/botbuilder-python
1,632
microsoft__botbuilder-python-1632
[ "1631" ]
95670788a1fd9778786ec19b4cdc1590b213e0d5
diff --git a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py --- a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py +++ b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py @@ -36,6 +36,7 @@ TokenStatus, TokenExchangeRequest, SignInUrlResponse, + TokenResponse as ConnectorTokenResponse, ) from botbuilder.schema import ( Activity, @@ -509,7 +510,10 @@ async def process_activity_with_identity( ) if invoke_response is None: return InvokeResponse(status=int(HTTPStatus.NOT_IMPLEMENTED)) - return invoke_response.value + return InvokeResponse( + status=invoke_response.value.status, + body=invoke_response.value.body.serialize(), + ) return None @@ -1267,8 +1271,13 @@ async def exchange_token_from_credentials( exchange_request.token, ) - if isinstance(result, TokenResponse): - return result + if isinstance(result, ConnectorTokenResponse): + return TokenResponse( + channel_id=result.channel_id, + connection_name=result.connection_name, + token=result.token, + expiration=result.expiration, + ) raise TypeError(f"exchange_async returned improper result: {type(result)}") @staticmethod
diff --git a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py --- a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py +++ b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py @@ -12,6 +12,7 @@ BotFrameworkAdapterSettings, TurnContext, ) +from botbuilder.core.invoke_response import InvokeResponse from botbuilder.schema import ( Activity, ActivityTypes, @@ -22,6 +23,13 @@ DeliveryModes, ExpectedReplies, CallerIdConstants, + SignInConstants, + TokenExchangeInvokeRequest, + TokenExchangeInvokeResponse, +) +from botframework.connector.token_api.models import ( + TokenExchangeRequest, + TokenResponse as ConnectorTokenResponse, ) from botframework.connector.aio import ConnectorClient from botframework.connector.auth import ( @@ -189,6 +197,31 @@ async def mock_create_conversation(parameters): return self.connector_client_mock + async def _create_token_api_client( + self, context: TurnContext, oauth_app_credentials: AppCredentials = None + ): + client = await super()._create_token_api_client(context, oauth_app_credentials) + + def mock_exchange_async( + user_id, # pylint: disable=unused-argument + connection_name, + channel_id, + uri=None, # pylint: disable=unused-argument + token=None, + custom_headers=None, # pylint: disable=unused-argument + raw=False, # pylint: disable=unused-argument + **operation_config, # pylint: disable=unused-argument + ): + return ConnectorTokenResponse( + channel_id=channel_id, + connection_name=connection_name, + token=token, + expiration=None, + ) + + client.user_token.exchange_async = mock_exchange_async + return client + async def process_activity( channel_id: str, channel_data_tenant_id: str, conversation_tenant_id: str @@ -731,3 +764,124 @@ async def callback(context: TurnContext): adapter.connector_client_mock.conversations.send_to_conversation.call_count == 3 ) + + async def test_process_activity_with_identity_token_exchange_invoke_response(self): + mock_credential_provider = unittest.mock.create_autospec(CredentialProvider) + + settings = BotFrameworkAdapterSettings( + app_id="bot_id", credential_provider=mock_credential_provider, + ) + adapter = AdapterUnderTest(settings) + + identity = ClaimsIdentity( + claims={ + AuthenticationConstants.AUDIENCE_CLAIM: "bot_id", + AuthenticationConstants.APP_ID_CLAIM: "bot_id", + AuthenticationConstants.VERSION_CLAIM: "1.0", + }, + is_authenticated=True, + ) + + inbound_activity = Activity( + type=ActivityTypes.invoke, + name=SignInConstants.token_exchange_operation_name, + service_url="http://tempuri.org/whatever", + delivery_mode=DeliveryModes.normal, + conversation=ConversationAccount(id="conversationId"), + value=TokenExchangeInvokeRequest( + id="token_exchange_id", + token="token", + connection_name="connection_name", + ), + ) + + async def callback(context: TurnContext): + activity = Activity( + type=ActivityTypes.invoke_response, + value=InvokeResponse( + status=200, + body=TokenExchangeInvokeResponse( + id=context.activity.value.id, + connection_name=context.activity.value.connection_name, + ), + ), + ) + + await context.send_activity(activity) + + invoke_response = await adapter.process_activity_with_identity( + inbound_activity, identity, callback, + ) + + assert invoke_response + assert invoke_response.status == 200 + assert invoke_response.body["id"] == inbound_activity.value.id + assert ( + invoke_response.body["connectionName"] + == inbound_activity.value.connection_name + ) + + async def test_exchange_token_from_credentials(self): + mock_credential_provider = unittest.mock.create_autospec(CredentialProvider) + + settings = BotFrameworkAdapterSettings( + app_id="bot_id", credential_provider=mock_credential_provider, + ) + adapter = AdapterUnderTest(settings) + + identity = ClaimsIdentity( + claims={ + AuthenticationConstants.AUDIENCE_CLAIM: "bot_id", + AuthenticationConstants.APP_ID_CLAIM: "bot_id", + AuthenticationConstants.VERSION_CLAIM: "1.0", + }, + is_authenticated=True, + ) + + inbound_activity = Activity( + type=ActivityTypes.invoke, + name=SignInConstants.token_exchange_operation_name, + service_url="http://tempuri.org/whatever", + conversation=ConversationAccount(id="conversationId"), + value=TokenExchangeInvokeRequest( + id="token_exchange_id", + token="token", + connection_name="connection_name", + ), + ) + + async def callback(context): + result = await adapter.exchange_token_from_credentials( + turn_context=context, + oauth_app_credentials=None, + connection_name=context.activity.value.connection_name, + exchange_request=TokenExchangeRequest( + token=context.activity.value.token, uri=context.activity.service_url + ), + user_id="user_id", + ) + + activity = Activity( + type=ActivityTypes.invoke_response, + value=InvokeResponse( + status=200, + body=TokenExchangeInvokeResponse( + id=context.activity.value.id, + connection_name=result.connection_name, + ), + ), + ) + + await context.send_activity(activity) + + invoke_response = await adapter.process_activity_with_identity( + inbound_activity, identity, callback, + ) + + assert invoke_response + assert invoke_response.status == 200 + assert invoke_response.body["id"] == inbound_activity.value.id + assert ( + invoke_response.body["connectionName"] + == inbound_activity.value.connection_name + )
[BotFrameworkAdapter] Process_activity returns HTTP 412 error when exchanging a token ## Version 4.12.0 ## Describe the bug When performing a token exchange operation between a Host and a Skill, the Host intercepts the oAuthCard sent from the Skill and sends back the Activity with the TokenExchangeInvokeRequest value, returning a response of **412** status and a body of `The bot is unable to exchange token. Proceed with regular login.`. This issue occurs in `BotFrameworkAdapter.exchange_token_from_credentials` method when the `UserTokenOperation.exchange_async` method from `BotFramework.Connector` gets executed returning the `BotFramework.Connector.TokenResponse`, followed by a comparison against `BotBuilder.Schema.TokenResponse` using the `isinstance` function causing to differ and raise the error ([Line#1270](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py#L1270)). Another behavior that came up as part of this issue discovered, when the `Adapter.process_activity` returns the `InvokeResponse `([Line#512](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py#L512)), the body (`TokenExchangeInvokeResponse`) is not serialized leaving the user to do it as part of the bot. ## To Reproduce Steps to reproduce the behavior: 1. Use the [DotNet Waterfall Host](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/DotNet/Consumers/CodeFirst/WaterfallHostBot) and the [Python Waterfall Skill](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/Python/Skills/CodeFirst/WaterfallSkillBot) bot samples from the BotFramework-FunctionalTests repo. 2. Configure the App Registrations required to test Sso. [link](https://docs.microsoft.com/en-us/azure/bot-service/bot-builder-authentication-sso?view=azure-bot-service-4.0&tabs=csharp%2Ceml). 3. Use WebChat or Emulator for the host. 4. Select `normal` delivery mode. 5. Select `Waterfall` group. 6. Select `WaterfallSkillBotPython` skill. 7. Select the `Sso` option. 8. Login in the Host. 9. Call the Skill with Sso. 10. Select Login in the skill. 11. It will not show the token and ask to SignIn. 12. The ProcessActivityAsync executed in the [SendTokenExchangeInvokeToSkillAsync](https://github.com/microsoft/BotFramework-FunctionalTests/blob/main/Bots/DotNet/Consumers/CodeFirst/WaterfallHostBot/TokenExchangeSkillHandler.cs#L172) will return a response of **412** status. ## Expected behavior - Resolve the correct `TokenResponse` instance comparing the `exchange_async` method result against `BotFramework.Connector.TokenResponse` and not `BotBuilder.Schema.TokenResponse`. - Serialize the `InvokeResponse` body when the `process_activity` method gets executed. ## Screenshots Host and Skill Sso communication failure. ![image](https://user-images.githubusercontent.com/62260472/113896016-ec995c80-979f-11eb-8770-8a832b128444.png) ![image](https://user-images.githubusercontent.com/62260472/113913432-b5807680-97b2-11eb-9d90-479ae8019c24.png) InvokeResponse body isn't serialized. ![image](https://user-images.githubusercontent.com/62260472/113913395-abf70e80-97b2-11eb-90c6-8da5af578c3c.png)
2021-04-08T20:55:44
microsoft/botbuilder-python
1,637
microsoft__botbuilder-python-1637
[ "1627" ]
c85bafaacda5aa44d61fafa39927db74047fc060
diff --git a/libraries/botbuilder-testing/setup.py b/libraries/botbuilder-testing/setup.py --- a/libraries/botbuilder-testing/setup.py +++ b/libraries/botbuilder-testing/setup.py @@ -8,6 +8,8 @@ "botbuilder-schema==4.13.0", "botbuilder-core==4.13.0", "botbuilder-dialogs==4.13.0", + "botbuilder-azure==4.13.0", + "pytest~=6.2.3", ] TESTS_REQUIRES = ["aiounittest==1.3.0"]
botbuilder-testing is missing install requirements ## Version botbuilder-testing 4.12.0 ## Describe the bug While installing botbuilder-testing for CI I got errors about missing dependencies. ## To Reproduce 1. `python3 -m venv .venv` 2. `. .venv/bin/activate` 3. `pip install -U pip wheel` 4. `pip install botbuilder-testing` 5. `python -c "from botbuilder.testing import DialogTestClient"` First error is missing `pytest`: ```python Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module> from .storage_base_tests import StorageBaseTests File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 26, in <module> import pytest ModuleNotFoundError: No module named 'pytest' ``` 6. `pip install pytest` 7. `python -c 'from botbuilder.testing import DialogTestClient'` Next error is missing `botbuilder-azure`: ```python Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/__init__.py", line 6, in <module> from .storage_base_tests import StorageBaseTests File "/home/calum/sureswift/jell/jell-bot-teams-v2/.venv-test/lib/python3.8/site-packages/botbuilder/testing/storage_base_tests.py", line 27, in <module> from botbuilder.azure import CosmosDbStorage ModuleNotFoundError: No module named 'botbuilder.azure' ``` 8. `pip install botbuilder-azure` 9. `python -c 'from botbuilder.testing import DialogTestClient'` Command works! ## Expected behavior No errors after installing botbuilder-testing and importing module I do wonder if the requirement for pytest is not necessary, leaving the lib test-suite agnostic and could be refactored out?
@axelsrz can you take a pass at this?
2021-04-13T00:21:33
microsoft/botbuilder-python
1,671
microsoft__botbuilder-python-1671
[ "1670" ]
85ec3769021c233c40df72dc86387d9739329986
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py @@ -3,7 +3,7 @@ from typing import Callable, Dict -from botbuilder.schema import ActivityTypes, InputHints +from botbuilder.schema import ActivityTypes from botbuilder.core import TurnContext from .prompt import Prompt, PromptValidatorContext @@ -39,10 +39,8 @@ async def on_prompt( ) if is_retry and options.retry_prompt: - options.retry_prompt.input_hint = InputHints.expecting_input await turn_context.send_activity(options.retry_prompt) elif options.prompt: - options.prompt.input_hint = InputHints.expecting_input await turn_context.send_activity(options.prompt) async def on_recognize(
diff --git a/libraries/botbuilder-dialogs/tests/test_attachment_prompt.py b/libraries/botbuilder-dialogs/tests/test_attachment_prompt.py --- a/libraries/botbuilder-dialogs/tests/test_attachment_prompt.py +++ b/libraries/botbuilder-dialogs/tests/test_attachment_prompt.py @@ -1,13 +1,14 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +import copy import aiounittest from botbuilder.dialogs.prompts import ( AttachmentPrompt, PromptOptions, PromptValidatorContext, ) -from botbuilder.schema import Activity, ActivityTypes, Attachment +from botbuilder.schema import Activity, ActivityTypes, Attachment, InputHints from botbuilder.core import ( TurnContext, @@ -71,6 +72,42 @@ async def exec_test(turn_context: TurnContext): step3 = await step2.send(attachment_activity) await step3.assert_reply("some content") + async def test_attachment_prompt_with_input_hint(self): + prompt_activity = Activity( + type=ActivityTypes.message, + text="please add an attachment.", + input_hint=InputHints.accepting_input, + ) + + async def exec_test(turn_context: TurnContext): + dialog_context = await dialogs.create_context(turn_context) + + results = await dialog_context.continue_dialog() + + if results.status == DialogTurnStatus.Empty: + options = PromptOptions(prompt=copy.copy(prompt_activity)) + await dialog_context.prompt("AttachmentPrompt", options) + elif results.status == DialogTurnStatus.Complete: + attachment = results.result[0] + content = MessageFactory.text(attachment.content) + await turn_context.send_activity(content) + + await convo_state.save_changes(turn_context) + + # Initialize TestAdapter. + adapter = TestAdapter(exec_test) + + # Create ConversationState with MemoryStorage and register the state as middleware. + convo_state = ConversationState(MemoryStorage()) + + # Create a DialogState property, DialogSet and AttachmentPrompt. + dialog_state = convo_state.create_property("dialog_state") + dialogs = DialogSet(dialog_state) + dialogs.add(AttachmentPrompt("AttachmentPrompt")) + + step1 = await adapter.send("hello") + await step1.assert_reply(prompt_activity) + async def test_attachment_prompt_with_validator(self): async def exec_test(turn_context: TurnContext): dialog_context = await dialogs.create_context(turn_context)
[Dialogs] InputHint is hardcoded for AttachmentPrompt as expecting_input ## Version 4.12.0 ## Describe the bug In the [AttachmentPrompt](https://github.com/microsoft/botbuilder-python/blame/main/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py) class, the **prompt.input_hint** is set as [expecting_input](https://github.com/microsoft/botbuilder-python/blame/main/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/attachment_prompt.py#L45) replacing any value the user could set. This behavior is not present in either .NET or JS SDKs. ## To Reproduce Steps to reproduce the behavior: 1. Using the sample: [05.multi-turn-prompt](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/05.multi-turn-prompt): - Open [dialogs/user_profile_dialog.py](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/python/05.multi-turn-prompt/dialogs/user_profile_dialog.py) file. - Add the following line to the imports section `from botbuilder.schema import InputHints`. - Update picture_step method adding `input_hint=InputHints.accepting_input` to the message as can be seen below: ![image](https://user-images.githubusercontent.com/44245136/115744805-45a0ed00-a369-11eb-9eb1-6d86e9d73a26.png) - Save the changes. - Open the console in the bots folder and run the following commands: - python -m pip install -r requirements.txt - python app.py 2. Open BotFramework Emulator and connect to your bot. 3. Follow the dialog flow until the profile picture step. 4. See the activity in the Inspection panel and check the value of the inputHint property. ![image](https://user-images.githubusercontent.com/44245136/115744882-56516300-a369-11eb-837b-833dcce1bec7.png) ## Expected behavior The bot should send the activity with the inputHint property that was set for the message in the prompt options.
2021-05-07T18:53:38
microsoft/botbuilder-python
1,675
microsoft__botbuilder-python-1675
[ "1674" ]
9551c21ab14cc321465cbc5ad842eb7837b056d0
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py @@ -777,9 +777,7 @@ async def on_teams_members_added( # pylint: disable=unused-argument ChannelAccount().deserialize(member.serialize()) for member in teams_members_added ] - return await super().on_members_added_activity( - teams_members_added, turn_context - ) + return await self.on_members_added_activity(teams_members_added, turn_context) async def on_teams_members_removed_dispatch( # pylint: disable=unused-argument self, @@ -833,7 +831,7 @@ async def on_teams_members_removed( # pylint: disable=unused-argument ChannelAccount().deserialize(member.serialize()) for member in teams_members_removed ] - return await super().on_members_removed_activity(members_removed, turn_context) + return await self.on_members_removed_activity(members_removed, turn_context) async def on_teams_channel_deleted( # pylint: disable=unused-argument self, channel_info: ChannelInfo, team_info: TeamInfo, turn_context: TurnContext
Difficult overriding behavior for members added/removed in TeamsActivityHandler ## Version 4.13 ## Describe the bug Behavior of methods [on_teams_members_added](https://github.com/microsoft/botbuilder-python/blob/4.12/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py#L759) and [on_teams_member_removed](https://github.com/microsoft/botbuilder-python/blob/4.12/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py#L815) of TeamsActivityHandler are difficult to override due to calling `super()` definition instead of `self`. ## To Reproduce Implement a TeamsActivityHandler class which custom behavior in any of those methods ## Expected behavior These methods call the implementation on `self`. ## Additional context This bug was suggested in the discussion of #1662
2021-05-13T18:40:49
microsoft/botbuilder-python
1,682
microsoft__botbuilder-python-1682
[ "1700" ]
c7cef569ff6e7ccd1a19def051ddad1db3334f6c
diff --git a/libraries/botframework-streaming/botframework/streaming/receive_request.py b/libraries/botframework-streaming/botframework/streaming/receive_request.py --- a/libraries/botframework-streaming/botframework/streaming/receive_request.py +++ b/libraries/botframework-streaming/botframework/streaming/receive_request.py @@ -8,7 +8,7 @@ class ReceiveRequest: def __init__( - self, *, verb: str = None, path: str = None, streams: List[ContentStream] + self, *, verb: str = None, path: str = None, streams: List[ContentStream] = None ): self.verb = verb self.path = path diff --git a/libraries/botframework-streaming/botframework/streaming/receive_response.py b/libraries/botframework-streaming/botframework/streaming/receive_response.py --- a/libraries/botframework-streaming/botframework/streaming/receive_response.py +++ b/libraries/botframework-streaming/botframework/streaming/receive_response.py @@ -9,9 +9,9 @@ class ReceiveResponse: - def __init__(self, status_code: int = None, streams: List[ContentStream] = None): + def __init__(self, status_code: int = 0, streams: List[ContentStream] = None): self.status_code = status_code - self.streams = streams + self.streams = streams or [] def read_body_as_json( self, cls: Union[Type[Model], Type[Serializable]] diff --git a/libraries/botframework-streaming/botframework/streaming/streaming_response.py b/libraries/botframework-streaming/botframework/streaming/streaming_response.py --- a/libraries/botframework-streaming/botframework/streaming/streaming_response.py +++ b/libraries/botframework-streaming/botframework/streaming/streaming_response.py @@ -2,6 +2,7 @@ # Licensed under the MIT License. import json +from http import HTTPStatus from uuid import UUID, uuid4 from typing import List, Union @@ -12,7 +13,7 @@ class StreamingResponse: def __init__( - self, *, status_code: int = None, streams: List[ResponseMessageStream] = None + self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None ): self.status_code = status_code self.streams = streams @@ -48,3 +49,20 @@ def create_response(status_code: int, body: object) -> "StreamingResponse": response.add_stream(body) return response + + @staticmethod + def not_found(body: object = None) -> "StreamingResponse": + return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body) + + @staticmethod + def forbidden(body: object = None) -> "StreamingResponse": + return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body) + + # pylint: disable=invalid-name + @staticmethod + def ok(body: object = None) -> "StreamingResponse": + return StreamingResponse.create_response(HTTPStatus.OK, body) + + @staticmethod + def internal_server_error(body: object = None) -> "StreamingResponse": + return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)
diff --git a/libraries/botframework-streaming/tests/test_content_stream.py b/libraries/botframework-streaming/tests/test_content_stream.py new file mode 100644 --- /dev/null +++ b/libraries/botframework-streaming/tests/test_content_stream.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from uuid import uuid4 + +import aiounittest + +from botframework.streaming.payloads import ContentStream +from botframework.streaming.payloads.assemblers import PayloadStreamAssembler + + +class TestResponses(aiounittest.AsyncTestCase): + async def test_content_stream_ctor_none_assembler_throws(self): + with self.assertRaises(TypeError): + ContentStream(uuid4(), None) + + async def test_content_stream_id(self): + test_id = uuid4() + test_assembler = PayloadStreamAssembler(None, test_id) + sut = ContentStream(test_id, test_assembler) + + self.assertEqual(test_id, sut.identifier) + + async def test_content_stream_type(self): + test_id = uuid4() + test_assembler = PayloadStreamAssembler(None, test_id) + sut = ContentStream(test_id, test_assembler) + test_type = "foo/bar" + + sut.content_type = test_type + + self.assertEqual(test_type, sut.content_type) + + sut.cancel() diff --git a/libraries/botframework-streaming/tests/test_requests.py b/libraries/botframework-streaming/tests/test_requests.py new file mode 100644 --- /dev/null +++ b/libraries/botframework-streaming/tests/test_requests.py @@ -0,0 +1,129 @@ +import json + +import aiounittest + +from botbuilder.schema import Activity +from botframework.streaming import ReceiveRequest, StreamingRequest +from botframework.streaming.payloads import ResponseMessageStream + + +class TestRequests(aiounittest.AsyncTestCase): + async def test_receive_request_empty_streams(self): + sut = ReceiveRequest() + + self.assertIsNotNone(sut.streams) + self.assertEqual(0, len(sut.streams)) + + async def test_receive_request_null_properties(self): + sut = ReceiveRequest() + + self.assertIsNone(sut.verb) + self.assertIsNone(sut.path) + + async def test_streaming_request_null_properties(self): + sut = StreamingRequest() + + self.assertIsNone(sut.verb) + self.assertIsNone(sut.path) + + async def test_streaming_request_add_stream_null_throws(self): + sut = StreamingRequest() + + with self.assertRaises(TypeError): + sut.add_stream(None) + + async def test_streaming_request_add_stream_success(self): + sut = StreamingRequest() + content = "hi" + + sut.add_stream(content) + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertEqual(content, sut.streams[0].content) + + async def test_streaming_request_add_stream_existing_list_success(self): + sut = StreamingRequest() + content = "hi" + content_2 = "hello" + + sut.streams = [ResponseMessageStream(content=content_2)] + + sut.add_stream(content) + + self.assertIsNotNone(sut.streams) + self.assertEqual(2, len(sut.streams)) + self.assertEqual(content_2, sut.streams[0].content) + self.assertEqual(content, sut.streams[1].content) + + async def test_streaming_request_create_get_success(self): + sut = StreamingRequest.create_get() + + self.assertEqual(StreamingRequest.GET, sut.verb) + self.assertIsNone(sut.path) + self.assertIsNone(sut.streams) + + async def test_streaming_request_create_post_success(self): + sut = StreamingRequest.create_post() + + self.assertEqual(StreamingRequest.POST, sut.verb) + self.assertIsNone(sut.path) + self.assertIsNone(sut.streams) + + async def test_streaming_request_create_delete_success(self): + sut = StreamingRequest.create_delete() + + self.assertEqual(StreamingRequest.DELETE, sut.verb) + self.assertIsNone(sut.path) + self.assertIsNone(sut.streams) + + async def test_streaming_request_create_put_success(self): + sut = StreamingRequest.create_put() + + self.assertEqual(StreamingRequest.PUT, sut.verb) + self.assertIsNone(sut.path) + self.assertIsNone(sut.streams) + + async def test_streaming_request_create_with_body_success(self): + content = "hi" + sut = StreamingRequest.create_request(StreamingRequest.POST, "123", content) + + self.assertEqual(StreamingRequest.POST, sut.verb) + self.assertEqual("123", sut.path) + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertEqual(content, sut.streams[0].content) + + async def test_streaming_request_set_body_string_success(self): + sut = StreamingRequest() + + sut.set_body("123") + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertIsInstance(sut.streams[0].content, list) + self.assertIsInstance(sut.streams[0].content[0], int) + self.assertEqual("123", bytes(sut.streams[0].content).decode("utf-8-sig")) + + async def test_streaming_request_set_body_none_does_not_throw(self): + sut = StreamingRequest() + + sut.set_body(None) + + async def test_streaming_request_set_body_success(self): + sut = StreamingRequest() + activity = Activity(text="hi", type="message") + + sut.set_body(activity) + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertIsInstance(sut.streams[0].content, list) + self.assertIsInstance(sut.streams[0].content[0], int) + + assert_activity = Activity.deserialize( + json.loads(bytes(sut.streams[0].content).decode("utf-8-sig")) + ) + + self.assertEqual(activity.text, assert_activity.text) + self.assertEqual(activity.type, assert_activity.type) diff --git a/libraries/botframework-streaming/tests/test_responses.py b/libraries/botframework-streaming/tests/test_responses.py new file mode 100644 --- /dev/null +++ b/libraries/botframework-streaming/tests/test_responses.py @@ -0,0 +1,132 @@ +import json +from http import HTTPStatus + +import aiounittest + +from botbuilder.schema import Activity +from botframework.streaming import ReceiveResponse, StreamingResponse +from botframework.streaming.payloads import ResponseMessageStream + + +class TestResponses(aiounittest.AsyncTestCase): + async def test_receive_response_empty_streams(self): + sut = ReceiveResponse() + + self.assertIsNotNone(sut.streams) + self.assertEqual(0, len(sut.streams)) + + async def test_receive_response_none_properties(self): + sut = ReceiveResponse() + + self.assertEqual(0, sut.status_code) + + async def test_streaming_response_null_properties(self): + sut = StreamingResponse() + + self.assertEqual(0, sut.status_code) + self.assertIsNone(sut.streams) + + async def test_streaming_response_add_stream_none_throws(self): + sut = StreamingResponse() + + with self.assertRaises(TypeError): + sut.add_stream(None) + + async def test_streaming_response_add_stream_success(self): + sut = StreamingResponse() + content = "hi" + + sut.add_stream(content) + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertEqual(content, sut.streams[0].content) + + async def test_streaming_response_add_stream_existing_list_success(self): + sut = StreamingResponse() + content = "hi" + content_2 = "hello" + + sut.streams = [ResponseMessageStream(content=content_2)] + + sut.add_stream(content) + + self.assertIsNotNone(sut.streams) + self.assertEqual(2, len(sut.streams)) + self.assertEqual(content_2, sut.streams[0].content) + self.assertEqual(content, sut.streams[1].content) + + async def test_streaming_response_not_found_success(self): + sut = StreamingResponse.not_found() + + self.assertEqual(HTTPStatus.NOT_FOUND, sut.status_code) + self.assertIsNone(sut.streams) + + async def test_streaming_response_forbidden_success(self): + sut = StreamingResponse.forbidden() + + self.assertEqual(HTTPStatus.FORBIDDEN, sut.status_code) + self.assertIsNone(sut.streams) + + async def test_streaming_response_ok_success(self): + sut = StreamingResponse.ok() + + self.assertEqual(HTTPStatus.OK, sut.status_code) + self.assertIsNone(sut.streams) + + async def test_streaming_response_internal_server_error_success(self): + sut = StreamingResponse.internal_server_error() + + self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, sut.status_code) + self.assertIsNone(sut.streams) + + async def test_streaming_response_create_with_body_success(self): + content = "hi" + sut = StreamingResponse.create_response(HTTPStatus.OK, content) + + self.assertEqual(HTTPStatus.OK, sut.status_code) + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertEqual(content, sut.streams[0].content) + + async def test_streaming_response_set_body_string_success(self): + sut = StreamingResponse() + + sut.set_body("123") + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertIsInstance(sut.streams[0].content, list) + self.assertIsInstance(sut.streams[0].content[0], int) + self.assertEqual("123", bytes(sut.streams[0].content).decode("utf-8-sig")) + + async def test_streaming_response_set_body_none_does_not_throw(self): + sut = StreamingResponse() + + sut.set_body(None) + + async def test_streaming_response_set_body_success(self): + sut = StreamingResponse() + activity = Activity(text="hi", type="message") + + sut.set_body(activity) + + self.assertIsNotNone(sut.streams) + self.assertEqual(1, len(sut.streams)) + self.assertIsInstance(sut.streams[0].content, list) + self.assertIsInstance(sut.streams[0].content[0], int) + + assert_activity = Activity.deserialize( + json.loads(bytes(sut.streams[0].content).decode("utf-8-sig")) + ) + + self.assertEqual(activity.text, assert_activity.text) + self.assertEqual(activity.type, assert_activity.type) + + async def test_receive_base_read_body_as_string_no_content_empty_string(self): + sut = ReceiveResponse() + sut.streams = [] + + result = sut.read_body_as_str() + + self.assertEqual("", result) diff --git a/libraries/botframework-streaming/tests/test_send_operations.py b/libraries/botframework-streaming/tests/test_send_operations.py new file mode 100644 --- /dev/null +++ b/libraries/botframework-streaming/tests/test_send_operations.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from typing import List +from uuid import uuid4 + +import aiounittest + +from botframework.streaming import PayloadStream, StreamingRequest +from botframework.streaming.payloads import SendOperations +from botframework.streaming.payloads.assemblers import PayloadStreamAssembler +from botframework.streaming.payload_transport import PayloadSender +from botframework.streaming.transport import TransportSenderBase + + +class MockTransportSender(TransportSenderBase): + def __init__(self): + super().__init__() + self.is_connected = True + self.buffers = [] + + async def send(self, buffer: List[int], offset: int, count: int) -> int: + self.buffers.append(buffer.copy()) + + return count + + +class TestSendOperations(aiounittest.AsyncTestCase): + async def test_request_dissasembler_with_variable_stream_send(self): + sender = PayloadSender() + transport = MockTransportSender() + sender.connect(transport) + + sut = SendOperations(sender) + + request = StreamingRequest.create_post("/a/b") + stream = PayloadStream(PayloadStreamAssembler(None, uuid4(), "blah", 100)) + stream.write([0] * 100, 0, 100) + request.add_stream(await stream.read_until_end()) + + await sut.send_request(uuid4(), request) + self.assertEqual(4, len(transport.buffers)) + + async def test_request_dissasembler_with_json_stream_send(self): + sender = PayloadSender() + transport = MockTransportSender() + sender.connect(transport) + + sut = SendOperations(sender) + + request = StreamingRequest.create_post("/a/b") + request.add_stream(bytes("abc", "ascii")) + + await sut.send_request(uuid4(), request) + self.assertEqual(4, len(transport.buffers))
Increase streaming unit tests reach parity with C# unit tests
2021-05-26T03:40:37
microsoft/botbuilder-python
1,684
microsoft__botbuilder-python-1684
[ "1683" ]
9f7e6540eef7d0391624f1c91475bb4b2c63bba0
diff --git a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py --- a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py +++ b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py @@ -65,15 +65,15 @@ def is_skill_claim(claims: Dict[str, object]) -> bool: :param claims: A dict of claims. :return bool: """ - if AuthenticationConstants.VERSION_CLAIM not in claims: - return False - if ( claims.get(AuthenticationConstants.APP_ID_CLAIM, None) == AuthenticationConstants.ANONYMOUS_SKILL_APP_ID ): return True + if AuthenticationConstants.VERSION_CLAIM not in claims: + return False + audience = claims.get(AuthenticationConstants.AUDIENCE_CLAIM) # The audience is https://api.botframework.com and not an appId.
diff --git a/libraries/botframework-connector/tests/test_auth.py b/libraries/botframework-connector/tests/test_auth.py --- a/libraries/botframework-connector/tests/test_auth.py +++ b/libraries/botframework-connector/tests/test_auth.py @@ -59,7 +59,7 @@ class TestAuth: @pytest.mark.asyncio async def test_claims_validation(self): - claims: List[Dict] = [] + claims: List[Dict] = {} default_auth_config = AuthenticationConfiguration() # No validator should pass.
is_skill_claim validation is not working for Anonymous authentication ## Version 4.13.0 ## Describe the bug When the skill bot doesn't have credentials set, the [is_skill_claim validation](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/botframework/connector/auth/skill_validation.py#L62) returns false because it checks the `version_claim` before checking the `anonymous_skill_app_id`. In [.NET](https://github.com/microsoft/botbuilder-dotnet/blob/main/libraries/Microsoft.Bot.Connector/Authentication/SkillValidation.cs#L87) and [JS](https://github.com/microsoft/botbuilder-js/blob/main/libraries/botframework-connector/src/auth/skillValidation.ts#L89) SDKs, the order of the validation is the opposite. ![image](https://user-images.githubusercontent.com/44245136/119866220-f5322780-bef2-11eb-9975-effd68690433.png) This is causing that the EndOfConversation activities are not sent when returning `DialogTurnResult(DialogTurnStatus.Complete)` from a dialog. This issue affects local testing when no credentials are provided for the bot. ## To Reproduce Steps to reproduce the behavior: 1. Using the following bots: [WaterfallHostBotDotNet](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/DotNet/Consumers/CodeFirst/WaterfallHostBot) and [WaterfallSkillBotPython](https://github.com/microsoft/BotFramework-FunctionalTests/tree/main/Bots/Python/Skills/CodeFirst/WaterfallSkillBot): 2. Run the bots. 3. Open BotFramework Emulator and connect to your host bot. 4. Follow the dialog selecting: `normal` delivery mode, `Waterfall` skills, `3. WaterfallSkillBotPython`, `1. Cards` skill action and `end` option. 5. See how the bots stop responding. ![image](https://user-images.githubusercontent.com/44245136/119871635-0847f600-bef9-11eb-8c76-4ca348ce759e.png) ## Expected behavior When selecting `end` (return `DialogTurnResult(DialogTurnStatus.Complete))` the skill bot must end the dialog sending an EOC activity to the host bot so this one can continue the dialog flow.
2021-05-28T13:25:25
microsoft/botbuilder-python
1,724
microsoft__botbuilder-python-1724
[ "1690" ]
167da30f9ff61aef328bca9a0b047af57698729a
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_handler.py @@ -14,6 +14,8 @@ TeamInfo, ChannelInfo, FileConsentCardResponse, + MeetingStartEventDetails, + MeetingEndEventDetails, TeamsChannelData, TeamsChannelAccount, MessagingExtensionAction, @@ -877,3 +879,64 @@ async def on_teams_channel_restored( # pylint: disable=unused-argument :returns: A task that represents the work queued to execute. """ return + + async def on_event_activity(self, turn_context: TurnContext): + """ + Invoked when an event activity is received from the connector when the base behavior of + :meth:`on_turn()` is used. + + :param turn_context: The context object for this turn + :type turn_context: :class:`botbuilder.core.TurnContext` + + :returns: A task that represents the work queued to execute + + .. remarks:: + When the :meth:`on_turn()` method receives an event activity, it calls this method. + If the activity name is `tokens/response`, it calls :meth:`on_token_response_event()`; + otherwise, it calls :meth:`on_event()`. + + In a derived class, override this method to add logic that applies to all event activities. + Add logic to apply before the specific event-handling logic before the call to this base class method. + Add logic to apply after the specific event-handling logic after the call to this base class method. + + Event activities communicate programmatic information from a client or channel to a bot. + The meaning of an event activity is defined by the event activity name property, which is meaningful within + the scope of a channel. + """ + if turn_context.activity.channel_id == Channels.ms_teams: + if turn_context.activity.name == "application/vnd.microsoft.meetingStart": + return await self.on_teams_meeting_start_event( + turn_context.activity.value, turn_context + ) + if turn_context.activity.name == "application/vnd.microsoft.meetingEnd": + return await self.on_teams_meeting_end_event( + turn_context.activity.value, turn_context + ) + + return await super().on_event_activity(turn_context) + + async def on_teams_meeting_start_event( + self, meeting: MeetingStartEventDetails, turn_context: TurnContext + ): # pylint: disable=unused-argument + """ + Override this in a derived class to provide logic for when a Teams meeting start event is received. + + :param meeting: The details of the meeting. + :param turn_context: A context object for this turn. + + :returns: A task that represents the work queued to execute. + """ + return + + async def on_teams_meeting_end_event( + self, meeting: MeetingEndEventDetails, turn_context: TurnContext + ): # pylint: disable=unused-argument + """ + Override this in a derived class to provide logic for when a Teams meeting end event is received. + + :param meeting: The details of the meeting. + :param turn_context: A context object for this turn. + + :returns: A task that represents the work queued to execute. + """ + return diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/__init__.py @@ -11,6 +11,8 @@ from ._models_py3 import FileUploadInfo from ._models_py3 import MeetingDetails from ._models_py3 import MeetingInfo +from ._models_py3 import MeetingStartEventDetails +from ._models_py3 import MeetingEndEventDetails from ._models_py3 import MessageActionsPayload from ._models_py3 import MessageActionsPayloadApp from ._models_py3 import MessageActionsPayloadAttachment @@ -87,6 +89,8 @@ "FileUploadInfo", "MeetingDetails", "MeetingInfo", + "MeetingStartEventDetails", + "MeetingEndEventDetails", "MessageActionsPayload", "MessageActionsPayloadApp", "MessageActionsPayloadAttachment", diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -2372,54 +2372,65 @@ def _custom_init(self): return -class MeetingDetails(Model): +class MeetingDetailsBase(Model): """Specific details of a Teams meeting. :param id: The meeting's Id, encoded as a BASE64 string. :type id: str + :param join_url: The URL used to join the meeting. + :type join_url: str + :param title: The title of the meeting. + :type title: str + """ + + _attribute_map = { + "id": {"key": "uniqueId", "type": "str"}, + "join_url": {"key": "joinUrl", "type": "str"}, + "title": {"key": "title", "type": "str"}, + } + + def __init__( + self, *, id: str = None, join_url: str = None, title: str = None, **kwargs + ) -> None: + super(MeetingDetailsBase, self).__init__(**kwargs) + self.id = id + self.join_url = join_url + self.title = title + + +class MeetingDetails(MeetingDetailsBase): + """Specific details of a Teams meeting. + :param ms_graph_resource_id: The MsGraphResourceId, used specifically for MS Graph API calls. :type ms_graph_resource_id: str :param scheduled_start_time: The meeting's scheduled start time, in UTC. :type scheduled_start_time: str :param scheduled_end_time: The meeting's scheduled end time, in UTC. :type scheduled_end_time: str - :param join_url: The URL used to join the meeting. - :type join_url: str - :param title: The title of the meeting. - :type title: str :param type: The meeting's type. :type type: str """ _attribute_map = { - "id": {"key": "uniqueId", "type": "str"}, "ms_graph_resource_id": {"key": "msGraphResourceId", "type": "str"}, "scheduled_start_time": {"key": "scheduledStartTime", "type": "str"}, "scheduled_end_time": {"key": "scheduledEndTime", "type": "str"}, - "join_url": {"key": "joinUrl", "type": "str"}, - "title": {"key": "title", "type": "str"}, "type": {"key": "type", "type": "str"}, } def __init__( self, *, - id: str = None, ms_graph_resource_id: str = None, scheduled_start_time: str = None, scheduled_end_time: str = None, - join_url: str = None, - title: str = None, type: str = None, **kwargs ) -> None: super(MeetingDetails, self).__init__(**kwargs) - self.id = id self.ms_graph_resource_id = ms_graph_resource_id self.scheduled_start_time = scheduled_start_time self.scheduled_end_time = scheduled_end_time - self.join_url = join_url - self.title = title self.type = type @@ -2452,3 +2463,45 @@ def __init__( self.details = details self.conversation = conversation self.organizer = organizer + + +class MeetingEventDetails(MeetingDetailsBase): + """Base class for Teams meting start and end events. + + :param meeting_type: The meeting's type. + :type meeting_type: str + """ + + _attribute_map = {"meeting_type": {"key": "MeetingType", "type": "str"}} + + def __init__(self, *, meeting_type: str = None, **kwargs): + super(MeetingEventDetails, self).__init__(**kwargs) + self.meeting_type = meeting_type + + +class MeetingStartEventDetails(MeetingDetailsBase): + """Specific details of a Teams meeting start event. + + :param start_time: Timestamp for meeting start, in UTC. + :type start_time: str + """ + + _attribute_map = {"start_time": {"key": "StartTime", "type": "str"}} + + def __init__(self, *, start_time: str = None, **kwargs): + super(MeetingStartEventDetails, self).__init__(**kwargs) + self.start_time = start_time + + +class MeetingEndEventDetails(MeetingDetailsBase): + """Specific details of a Teams meeting end event. + + :param end_time: Timestamp for meeting end, in UTC. + :type end_time: str + """ + + _attribute_map = {"end_time": {"key": "EndTime", "type": "str"}} + + def __init__(self, *, end_time: str = None, **kwargs): + super(MeetingEndEventDetails, self).__init__(**kwargs) + self.end_time = end_time
diff --git a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py --- a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py +++ b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py @@ -19,6 +19,8 @@ AppBasedLinkQuery, ChannelInfo, FileConsentCardResponse, + MeetingStartEventDetails, + MeetingEndEventDetails, MessageActionsPayload, MessagingExtensionAction, MessagingExtensionQuery, @@ -309,6 +311,26 @@ async def on_teams_tab_submit( self.record.append("on_teams_tab_submit") return await super().on_teams_tab_submit(turn_context, tab_submit) + async def on_event_activity(self, turn_context: TurnContext): + self.record.append("on_event_activity") + return await super().on_event_activity(turn_context) + + async def on_teams_meeting_start_event( + self, meeting: MeetingStartEventDetails, turn_context: TurnContext + ): + self.record.append("on_teams_meeting_start_event") + return await super().on_teams_meeting_start_event( + turn_context.activity.value, turn_context + ) + + async def on_teams_meeting_end_event( + self, meeting: MeetingEndEventDetails, turn_context: TurnContext + ): + self.record.append("on_teams_meeting_end_event") + return await super().on_teams_meeting_end_event( + turn_context.activity.value, turn_context + ) + class NotImplementedAdapter(BotAdapter): async def delete_activity( @@ -1064,3 +1086,37 @@ async def test_typing_activity(self): assert len(bot.record) == 1 assert bot.record[0] == "on_typing_activity" + + async def test_on_teams_meeting_start_event(self): + activity = Activity( + type=ActivityTypes.event, + name="application/vnd.microsoft.meetingStart", + channel_id=Channels.ms_teams, + ) + + turn_context = TurnContext(SimpleAdapter(), activity) + + # Act + bot = TestingTeamsActivityHandler() + await bot.on_turn(turn_context) + + assert len(bot.record) == 2 + assert bot.record[0] == "on_event_activity" + assert bot.record[1] == "on_teams_meeting_start_event" + + async def test_on_teams_meeting_end_event(self): + activity = Activity( + type=ActivityTypes.event, + name="application/vnd.microsoft.meetingEnd", + channel_id=Channels.ms_teams, + ) + + turn_context = TurnContext(SimpleAdapter(), activity) + + # Act + bot = TestingTeamsActivityHandler() + await bot.on_turn(turn_context) + + assert len(bot.record) == 2 + assert bot.record[0] == "on_event_activity" + assert bot.record[1] == "on_teams_meeting_end_event"
Teams - meeting start/end events (Python) See [parent](https://github.com/microsoft/botframework-sdk/issues/6305)
2021-06-18T20:20:52
microsoft/botbuilder-python
1,726
microsoft__botbuilder-python-1726
[ "1672" ]
80f25c922dfae8664d1c0d64f55f189ff8b4458a
diff --git a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py --- a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py +++ b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py @@ -511,10 +511,7 @@ async def process_activity_with_identity( if invoke_response is None: return InvokeResponse(status=int(HTTPStatus.NOT_IMPLEMENTED)) return InvokeResponse( - status=invoke_response.value.status, - body=invoke_response.value.body.serialize() - if invoke_response.value.body - else None, + status=invoke_response.value.status, body=invoke_response.value.body, ) return None diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py --- a/libraries/botbuilder-integration-aiohttp/setup.py +++ b/libraries/botbuilder-integration-aiohttp/setup.py @@ -10,7 +10,7 @@ "botframework-connector==4.14.0", "botbuilder-core==4.14.0", "yarl<=1.4.2", - "aiohttp~=3.6.2", + "aiohttp>=3.6.2,<3.8.0", ] root = os.path.abspath(os.path.dirname(__file__))
diff --git a/libraries/botbuilder-ai/tests/requirements.txt b/libraries/botbuilder-ai/tests/requirements.txt --- a/libraries/botbuilder-ai/tests/requirements.txt +++ b/libraries/botbuilder-ai/tests/requirements.txt @@ -1 +1 @@ -aioresponses~=0.6.3 \ No newline at end of file +aioresponses~=0.7.2 \ No newline at end of file diff --git a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py --- a/libraries/botbuilder-core/tests/test_bot_framework_adapter.py +++ b/libraries/botbuilder-core/tests/test_bot_framework_adapter.py @@ -815,9 +815,9 @@ async def callback(context: TurnContext): assert invoke_response assert invoke_response.status == 200 - assert invoke_response.body["id"] == inbound_activity.value.id + assert invoke_response.body.id == inbound_activity.value.id assert ( - invoke_response.body["connectionName"] + invoke_response.body.connection_name == inbound_activity.value.connection_name ) @@ -880,8 +880,8 @@ async def callback(context): assert invoke_response assert invoke_response.status == 200 - assert invoke_response.body["id"] == inbound_activity.value.id + assert invoke_response.body.id == inbound_activity.value.id assert ( - invoke_response.body["connectionName"] + invoke_response.body.connection_name == inbound_activity.value.connection_name ) diff --git a/libraries/botframework-connector/tests/requirements.txt b/libraries/botframework-connector/tests/requirements.txt --- a/libraries/botframework-connector/tests/requirements.txt +++ b/libraries/botframework-connector/tests/requirements.txt @@ -1,5 +1,5 @@ pytest-cov>=2.6.0 -pytest==5.2.2 +pytest~=6.2.3 azure-devtools>=0.4.1 -pytest-asyncio==0.10.0 +pytest-asyncio==0.15.1 ddt==1.2.1 \ No newline at end of file
BotFrameworkAdapter.process_activity_with_identity calls serialize() on dict ## Version 4.13.0 ## Describe the bug When running, e.g., the [fetch task](https://github.com/microsoft/botbuilder-python/tree/main/tests/teams/scenarios/action-based-messaging-extension-fetch-task) example, an exception is raised when `on_teams_messaging_extension_fetch_task` is called. The problem is that in `bot_framework_adapter.py` on line 515, `serialize()` is called on `invoke_response.value.body`, which is a python dict. ## To Reproduce Steps to reproduce the behavior: 1. run the above-mentioned example and add to teams 2. Trigger bot's 'create card' message extension 3. A dialog is shown with an error, and an exception is raised in the bot code. ## Expected behavior Expected behavior is that the 'Create Card' example dialog is shown instead of an error. A fix to this error could for instance be to change line 515 in `bot_framework_adapter.py` to: ```python return InvokeResponse( status=invoke_response.value.status, body=invoke_response.value.body # line 515 ```
@axelsrz Can you take a look at ths? @jooste thanks for submitting this, we will investigate and provide a fix in our next release if found necessary. @axelsrz any updates on this issue? @jooste thanks for summitting this issue. We'll prioritize fixing this in the next milestone.
2021-06-21T19:50:31
microsoft/botbuilder-python
1,727
microsoft__botbuilder-python-1727
[ "1685" ]
d05e62e9c5c1fbaa47b68381e323a15df6480928
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py @@ -219,50 +219,38 @@ async def cancel_all_dialogs( :param event_value: :return: """ - # pylint: disable=too-many-nested-blocks try: - if cancel_parents is None: - event_name = event_name or DialogEvents.cancel_dialog - - if self.stack or self.parent: - # Cancel all local and parent dialogs while checking for interception - notify = False - dialog_context = self - - while dialog_context: - if dialog_context.stack: - # Check to see if the dialog wants to handle the event - if notify: - event_handled = await dialog_context.emit_event( - event_name, - event_value, - bubble=False, - from_leaf=False, - ) - - if event_handled: - break - - # End the active dialog - await dialog_context.end_active_dialog( - DialogReason.CancelCalled - ) - else: - dialog_context = ( - dialog_context.parent if cancel_parents else None + event_name = event_name or DialogEvents.cancel_dialog + if self.stack or self.parent: + # Cancel all local and parent dialogs while checking for interception + notify = False + dialog_context = self + + while dialog_context: + if dialog_context.stack: + # Check to see if the dialog wants to handle the event + if notify: + event_handled = await dialog_context.emit_event( + event_name, event_value, bubble=False, from_leaf=False, ) - notify = True + if event_handled: + break + + # End the active dialog + await dialog_context.end_active_dialog( + DialogReason.CancelCalled + ) + else: + dialog_context = ( + dialog_context.parent if cancel_parents else None + ) - return DialogTurnResult(DialogTurnStatus.Cancelled) - # Stack was empty and no parent - return DialogTurnResult(DialogTurnStatus.Empty) + notify = True - if self.stack: - while self.stack: - await self.end_active_dialog(DialogReason.CancelCalled) return DialogTurnResult(DialogTurnStatus.Cancelled) + # Stack was empty and no parent return DialogTurnResult(DialogTurnStatus.Empty) except Exception as err: self.__set_exception_context_data(err)
DialogContext.cancel_all_dialogs(True) does not propagate cancellation through parent dialogs ## Version 4.13.0 ## Describe the bug Within a child dialog we should be able to `cancel_all_dialogs` by setting the first parameter (`cancel_parents`) to `True` and that propagates up through the parents thus cancelling the conversation. However due to the following statement in [dialog_context.py#L224]( https://github.com/microsoft/botbuilder-python/blob/885d3d11304aa3fedefee400378721c2c3e423bd/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_context.py#L224) this does not happen and instead the parent dialog's next step is run. ``` if cancel_parents is None: ``` ## To Reproduce Steps to reproduce the behavior: 1. In parent dialog MainDialog call `begin_dialog('ChildDialog')` 2. In ChildDialog waterfall step call `step.cancel_all_dialogs(True)` ## Expected behavior All dialogs/conversation are ended ## Additional Notes [botbuilder-js dialogContext.cancelAllDialogs](https://github.com/microsoft/botbuilder-js/blob/d57152ab47e0b02cef23f629775ba9f77d930b0f/libraries/botbuilder-dialogs/src/dialogContext.ts#L285)
@axelsrz could you take a look Hello @cas-- , thanks for reporting this issue, we will schedule a fix for our next release.
2021-06-21T20:02:49
microsoft/botbuilder-python
1,747
microsoft__botbuilder-python-1747
[ "1626" ]
557462f80e5789dbcf0757ca1e9db037c156b68d
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/turn_memory_scope.py @@ -58,7 +58,7 @@ def _convert_keys(self): class TurnMemoryScope(MemoryScope): def __init__(self): - super().__init__(scope_path.TURN) + super().__init__(scope_path.TURN, False) def get_memory(self, dialog_context: "DialogContext") -> object: if not dialog_context:
port: turn memory scope includesnapshot to false (#5441) The changes in [turn memory scope includesnapshot to false (#5441)](https://github.com/microsoft/botbuilder-dotnet/pull/5441) may need to be ported to maintain parity with `microsoft/botbuilder-dotnet`. <blockquote> Fixes #5432 </blockquote> Please review and, if necessary, port the changes.
2021-06-24T20:55:13
microsoft/botbuilder-python
1,748
microsoft__botbuilder-python-1748
[ "1736" ]
1bc19a74e344bb889735ac205c25e7050c785d67
diff --git a/libraries/botbuilder-core/setup.py b/libraries/botbuilder-core/setup.py --- a/libraries/botbuilder-core/setup.py +++ b/libraries/botbuilder-core/setup.py @@ -8,6 +8,7 @@ REQUIRES = [ "botbuilder-schema==4.14.0", "botframework-connector==4.14.0", + "botframework-streaming==4.14.0", "jsonpickle>=1.2,<1.5", ] diff --git a/libraries/botframework-streaming/setup.py b/libraries/botframework-streaming/setup.py --- a/libraries/botframework-streaming/setup.py +++ b/libraries/botframework-streaming/setup.py @@ -4,11 +4,10 @@ import os from setuptools import setup -VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0" +VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0" REQUIRES = [ "botbuilder-schema>=4.12.0", "botframework-connector>=4.12.0", - "botbuilder-core>=4.12.0", ] root = os.path.abspath(os.path.dirname(__file__))
botbuilder-core library is missing the botframework-streaming dependency ## Version 4.14.0.20210616.dev252366 ## Describe the bug The botbuilder-core library is missing the botframework-streaming dependency. When running a python bot with the botbuilder-core library installed, it won't run because it is missing the botframework-streaming dependency. The dependency reference is missing from the requirements.txt file, and this new library is not published in any of the regular packages indexes ([test.pypi](https://test.pypi.org/), [pypi](https://pypi.org/) and [azure artifacts](https://dev.azure.com/ConversationalAI/BotFramework/_packaging?_a=feed&feed=SDK%40Local)), so it can't be installed manually. When running the bots locally it is possible to install the dependency from a local folder with the code cloned from the repo. ## To Reproduce 1. Open a bot that uses the botbuilder-core library. 2. Install a preview version (4.14.x). 3. Run the bot. ## Expected behavior The dependencies being installed should install all the required sub-dependencies or have them available for manual installation. ## Screenshots ![image](https://user-images.githubusercontent.com/38112957/122459875-d79f2d80-cf87-11eb-93dd-d6aaf5128fbd.png) ## Additional context This issue is blocking the pipelines from the [BotFramework-FunctionalTests](https://github.com/microsoft/BotFramework-FunctionalTests/) repository from testing preview versions of the BotBuilder Python libraries.
2021-06-24T21:03:43
microsoft/botbuilder-python
1,749
microsoft__botbuilder-python-1749
[ "1665" ]
3a91f0fc6221c5cf146836b7b881120489c88e37
diff --git a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py --- a/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py +++ b/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py @@ -340,6 +340,7 @@ async def create_conversation( If the conversation is established with the specified users, the ID of the activity will contain the ID of the new conversation. """ + try: if not service_url: service_url = reference.service_url @@ -366,8 +367,10 @@ async def create_conversation( # Mix in the tenant ID if specified. This is required for MS Teams. if reference.conversation and reference.conversation.tenant_id: # Putting tenant_id in channel_data is a temporary while we wait for the Teams API to be updated - parameters.channel_data = { - "tenant": {"tenantId": reference.conversation.tenant_id} + if parameters.channel_data is None: + parameters.channel_data = {} + parameters.channel_data["tenant"] = { + "tenantId": reference.conversation.tenant_id } # Permanent solution is to put tenant_id in parameters.tenant_id
BotFrameworkAdapter.create_conversation overrides parameters.channel_data ## Version 4.13 ## Describe the bug A Teams bot using BotFrameworkAdapter can't create conversation in a team channel. turn_context.adapter.create_conversation() raises a parameter error of HTTP request. ## To Reproduce Steps to reproduce the behavior: 1. Deploy a bot using BotFrameworkAdapter and turn_context.adapter.create_conversation() method to create a conversation in a team channel. To do it, conversation_parameters argument of create_conversation() must have channel_data attribute that has information of the channel to use. 2. Show the logs of the bot. If you use Azure, Log-in to the Azure portal with a web browser, click "app service", click the bot app and click Log Stream. 3. Log-in Teams. 4. Make the bot creating a conversation in a team channel. 5. See an error. 6. Confirm the error logs in the log window you opened at step 2. ## Expected behavior A conversation will be created without any error. ## Screenshots No. ## Additional context No. ## Additional Information parameters.channel_data looks overridden in create_conversation() below: https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/bot_framework_adapter.py#L369 IMO, it should be: ``` if parameters.channel_data is None: parameters.channel_data = {} parameters.channel_data["tenant"] = {"tenantId": reference.conversation.tenant_id} ```
Thank you for the report, @yosshy Can you provide some code to help us reproduce the issue? Are you following a sample, perhaps? Also, are you able to reproduce this problem with a local bot or only with a deployed bot? @v-kydela thank you for your reply. I'm not following a sample but creating a new bot for our purpose. Currently, I can't provide our code soon. If "a local bot" means one running with Bot Framework Emulator, we can't reproduce it because it is not for a team channel but for 1:1 chat, IMO. So, we can do it only with a deployed one. Hi @yosshy While Emulator is useful for testing local bots, Emulator is just another client and does not define where the bot is running. You can test local bots on Emulator or Teams, and you can test deployed bots on Emulator or Teams. If you're unfamiliar with tunneling, please refer to this documentation: https://docs.microsoft.com/azure/bot-service/bot-service-debug-channel-ngrok Hi @v-kydela , Thank you for your description about local bots. I have no local environment for a local bot but I'll prepare it soon. @yosshy - Have you prepared a local environment yet? @cleemullins - I think this is a legitimate bug rather than a support issue. A suggested fix was provided in the original post, but I think it should be applied to the other SDK's as well. Can this be escalated? @yosshy - As a workaround, you can bypass the adapter by calling `connector_client.conversations.create_conversation` directly as seen in [this sample](https://github.com/microsoft/BotBuilder-Samples/blob/main/samples/python/58.teams-start-thread-in-channel/bots/teams_start_thread_in_channel.py#L35) @EricDahlvang - To reproduce this bug, you need to call `adapter.create_conversation` as described in the original repro steps. The sample can be modified to do so. I'll go ahead and unassign myself so it can be triaged @axelsrz can you take a look and confirm this is a bug in the product. We will add this bug to our current iteration to provide a fix, as well as track this fix on our other SDKs. To those that have reproduced this error, what data is being put into ConversationParameters.channel_data? Specifically, how does that differ from sample 57.teams-conversation-bot, which doesn't populate channel_data at all? What I'm trying to chase down is whether we still have to add tenant id to channel_data.
2021-06-24T21:31:40
microsoft/botbuilder-python
1,753
microsoft__botbuilder-python-1753
[ "1573" ]
27178840ac658a982ffc66ecf4aaa39161a9f3ba
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_extensions.py @@ -9,8 +9,11 @@ ) from botbuilder.core import BotAdapter, StatePropertyAccessor, TurnContext from botbuilder.core.skills import SkillHandler, SkillConversationReference +import botbuilder.dialogs as dialogs # pylint: disable=unused-import +from botbuilder.dialogs.memory import DialogStateManager +from botbuilder.dialogs.dialog_context import DialogContext +from botbuilder.dialogs.dialog_turn_result import DialogTurnResult from botbuilder.dialogs import ( - Dialog, DialogEvents, DialogSet, DialogTurnStatus, @@ -21,7 +24,9 @@ class DialogExtensions: @staticmethod async def run_dialog( - dialog: Dialog, turn_context: TurnContext, accessor: StatePropertyAccessor + dialog: "dialogs.Dialog", + turn_context: TurnContext, + accessor: StatePropertyAccessor, ): """ Creates a dialog stack and starts a dialog, pushing it onto the stack. @@ -30,20 +35,71 @@ async def run_dialog( dialog_set = DialogSet(accessor) dialog_set.add(dialog) - dialog_context = await dialog_set.create_context(turn_context) + dialog_context: DialogContext = await dialog_set.create_context(turn_context) + await DialogExtensions._internal_run(turn_context, dialog.id, dialog_context) + + @staticmethod + async def _internal_run( + context: TurnContext, dialog_id: str, dialog_context: DialogContext + ) -> DialogTurnResult: + # map TurnState into root dialog context.services + for key, service in context.turn_state.items(): + dialog_context.services[key] = service + + # get the DialogStateManager configuration + dialog_state_manager = DialogStateManager(dialog_context) + await dialog_state_manager.load_all_scopes() + dialog_context.context.turn_state[ + dialog_state_manager.__class__.__name__ + ] = dialog_state_manager + + # Loop as long as we are getting valid OnError handled we should continue executing the actions for the turn. + + # NOTE: We loop around this block because each pass through we either complete the turn and break out of the + # loop or we have had an exception AND there was an OnError action which captured the error. We need to + # continue the turn based on the actions the OnError handler introduced. + end_of_turn = False + while not end_of_turn: + try: + dialog_turn_result = await DialogExtensions.__inner_run( + context, dialog_id, dialog_context + ) + + # turn successfully completed, break the loop + end_of_turn = True + except Exception as err: + # fire error event, bubbling from the leaf. + handled = await dialog_context.emit_event( + DialogEvents.error, err, bubble=True, from_leaf=True + ) + + if not handled: + # error was NOT handled, throw the exception and end the turn. (This will trigger the + # Adapter.OnError handler and end the entire dialog stack) + raise + + # save all state scopes to their respective botState locations. + await dialog_state_manager.save_all_changes() + + # return the redundant result because the DialogManager contract expects it + return dialog_turn_result + + @staticmethod + async def __inner_run( + turn_context: TurnContext, dialog_id: str, dialog_context: DialogContext + ) -> DialogTurnResult: # Handle EoC and Reprompt event from a parent bot (can be root bot to skill or skill to skill) if DialogExtensions.__is_from_parent_to_skill(turn_context): # Handle remote cancellation request from parent. if turn_context.activity.type == ActivityTypes.end_of_conversation: if not dialog_context.stack: # No dialogs to cancel, just return. - return + return DialogTurnResult(DialogTurnStatus.Empty) # Send cancellation message to the dialog to ensure all the parents are canceled # in the right order. - await dialog_context.cancel_all_dialogs() - return + return await dialog_context.cancel_all_dialogs(True) # Handle a reprompt event sent from the parent. if ( @@ -52,15 +108,17 @@ async def run_dialog( ): if not dialog_context.stack: # No dialogs to reprompt, just return. - return + return DialogTurnResult(DialogTurnStatus.Empty) await dialog_context.reprompt_dialog() - return + return DialogTurnResult(DialogTurnStatus.Waiting) # Continue or start the dialog. result = await dialog_context.continue_dialog() if result.status == DialogTurnStatus.Empty: - result = await dialog_context.begin_dialog(dialog.id) + result = await dialog_context.begin_dialog(dialog_id) + + await DialogExtensions._send_state_snapshot_trace(dialog_context) # Skills should send EoC when the dialog completes. if ( @@ -78,6 +136,8 @@ async def run_dialog( ) await turn_context.send_activity(activity) + return result + @staticmethod def __is_from_parent_to_skill(turn_context: TurnContext) -> bool: if turn_context.turn_state.get(SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY): @@ -88,6 +148,34 @@ def __is_from_parent_to_skill(turn_context: TurnContext) -> bool: claims_identity, ClaimsIdentity ) and SkillValidation.is_skill_claim(claims_identity.claims) + @staticmethod + async def _send_state_snapshot_trace(dialog_context: DialogContext): + """ + Helper to send a trace activity with a memory snapshot of the active dialog DC. + :param dialog_context: + :return: + """ + claims_identity = dialog_context.context.turn_state.get( + BotAdapter.BOT_IDENTITY_KEY, None + ) + trace_label = ( + "Skill State" + if isinstance(claims_identity, ClaimsIdentity) + and SkillValidation.is_skill_claim(claims_identity.claims) + else "Bot State" + ) + # send trace of memory + snapshot = DialogExtensions._get_active_dialog_context( + dialog_context + ).state.get_memory_snapshot() + trace_activity = Activity.create_trace_activity( + "BotState", + "https://www.botframework.com/schemas/botState", + snapshot, + trace_label, + ) + await dialog_context.context.send_activity(trace_activity) + @staticmethod def __send_eoc_to_parent(turn_context: TurnContext) -> bool: claims_identity = turn_context.turn_state.get(BotAdapter.BOT_IDENTITY_KEY) @@ -111,3 +199,16 @@ def __send_eoc_to_parent(turn_context: TurnContext) -> bool: return True return False + + @staticmethod + def _get_active_dialog_context(dialog_context: DialogContext) -> DialogContext: + """ + Recursively walk up the DC stack to find the active DC. + :param dialog_context: + :return: + """ + child = dialog_context.child + if not child: + return dialog_context + + return DialogExtensions._get_active_dialog_context(child) diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_manager.py @@ -3,6 +3,7 @@ from datetime import datetime, timedelta from threading import Lock +from warnings import warn from botbuilder.core import ( BotAdapter, @@ -12,10 +13,7 @@ TurnContext, ) from botbuilder.core.skills import SkillConversationReference, SkillHandler -from botbuilder.dialogs.memory import ( - DialogStateManager, - DialogStateManagerConfiguration, -) +from botbuilder.dialogs.memory import DialogStateManagerConfiguration from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes from botframework.connector.auth import ( AuthenticationConstants, @@ -27,6 +25,7 @@ from .dialog import Dialog from .dialog_context import DialogContext from .dialog_events import DialogEvents +from .dialog_extensions import DialogExtensions from .dialog_set import DialogSet from .dialog_state import DialogState from .dialog_manager_result import DialogManagerResult @@ -142,60 +141,10 @@ async def on_turn(self, context: TurnContext) -> DialogManagerResult: # Create DialogContext dialog_context = DialogContext(self.dialogs, context, dialog_state) - # promote initial TurnState into dialog_context.services for contextual services - for key, service in dialog_context.services.items(): - dialog_context.services[key] = service - - # map TurnState into root dialog context.services - for key, service in context.turn_state.items(): - dialog_context.services[key] = service - - # get the DialogStateManager configuration - dialog_state_manager = DialogStateManager( - dialog_context, self.state_configuration + # Call the common dialog "continue/begin" execution pattern shared with the classic RunAsync extension method + turn_result = await DialogExtensions._internal_run( # pylint: disable=protected-access + context, self._root_dialog_id, dialog_context ) - await dialog_state_manager.load_all_scopes() - dialog_context.context.turn_state[ - dialog_state_manager.__class__.__name__ - ] = dialog_state_manager - - turn_result: DialogTurnResult = None - - # Loop as long as we are getting valid OnError handled we should continue executing the actions for the turn. - - # NOTE: We loop around this block because each pass through we either complete the turn and break out of the - # loop or we have had an exception AND there was an OnError action which captured the error. We need to - # continue the turn based on the actions the OnError handler introduced. - end_of_turn = False - while not end_of_turn: - try: - claims_identity: ClaimsIdentity = context.turn_state.get( - BotAdapter.BOT_IDENTITY_KEY, None - ) - if isinstance( - claims_identity, ClaimsIdentity - ) and SkillValidation.is_skill_claim(claims_identity.claims): - # The bot is running as a skill. - turn_result = await self.handle_skill_on_turn(dialog_context) - else: - # The bot is running as root bot. - turn_result = await self.handle_bot_on_turn(dialog_context) - - # turn successfully completed, break the loop - end_of_turn = True - except Exception as err: - # fire error event, bubbling from the leaf. - handled = await dialog_context.emit_event( - DialogEvents.error, err, bubble=True, from_leaf=True - ) - - if not handled: - # error was NOT handled, throw the exception and end the turn. (This will trigger the - # Adapter.OnError handler and end the entire dialog stack) - raise - - # save all state scopes to their respective botState locations. - await dialog_state_manager.save_all_changes() # save BotState changes await bot_state_set.save_all_changes(dialog_context.context, False) @@ -204,7 +153,8 @@ async def on_turn(self, context: TurnContext) -> DialogManagerResult: @staticmethod async def send_state_snapshot_trace( - dialog_context: DialogContext, trace_label: str + dialog_context: DialogContext, + trace_label: str = None, # pylint: disable=unused-argument ): """ Helper to send a trace activity with a memory snapshot of the active dialog DC. @@ -212,17 +162,13 @@ async def send_state_snapshot_trace( :param trace_label: :return: """ - # send trace of memory - snapshot = DialogManager.get_active_dialog_context( + warn( + "This method will be deprecated as no longer is necesary", + PendingDeprecationWarning, + ) + await DialogExtensions._send_state_snapshot_trace( # pylint: disable=protected-access dialog_context - ).state.get_memory_snapshot() - trace_activity = Activity.create_trace_activity( - "BotState", - "https://www.botframework.com/schemas/botState", - snapshot, - trace_label, ) - await dialog_context.context.send_activity(trace_activity) @staticmethod def is_from_parent_to_skill(turn_context: TurnContext) -> bool: @@ -246,11 +192,13 @@ def get_active_dialog_context(dialog_context: DialogContext) -> DialogContext: :param dialog_context: :return: """ - child = dialog_context.child - if not child: - return dialog_context - - return DialogManager.get_active_dialog_context(child) + warn( + "This method will be deprecated as no longer is necesary", + PendingDeprecationWarning, + ) + return DialogExtensions._get_active_dialog_context( # pylint: disable=protected-access + dialog_context + ) @staticmethod def should_send_end_of_conversation_to_parent( @@ -294,6 +242,10 @@ def should_send_end_of_conversation_to_parent( async def handle_skill_on_turn( self, dialog_context: DialogContext ) -> DialogTurnResult: + warn( + "This method will be deprecated as no longer is necesary", + PendingDeprecationWarning, + ) # the bot is running as a skill. turn_context = dialog_context.context @@ -348,6 +300,10 @@ async def handle_skill_on_turn( async def handle_bot_on_turn( self, dialog_context: DialogContext ) -> DialogTurnResult: + warn( + "This method will be deprecated as no longer is necesary", + PendingDeprecationWarning, + ) # the bot is running as a root bot. if dialog_context.active_dialog is None: # start root dialog
port: Duplicate code across classic DialogExtensions.RunAsync and DialogManager The changes in [Johtaylo/dialogrunasync (#5294)](https://github.com/microsoft/botbuilder-dotnet/pull/5294) may need to be ported to maintain parity with `microsoft/botbuilder-dotnet`. <blockquote> Fixes #5293 This fix removes the duplicate code. It follows the suggestion made in the associated issue to share an _internal_ implementation. The main reason for this is because the DialogManager is expected to return a DialogTurnResult. RunAsync never returned this, because the very point of the function was to hide this, and the associated continue-begin dance, as it is meaningless to the caller. (Note when use directly in an IBot implementation the result is, naturally, ignored.) The other, even more obscure, reason is that the DialogManager exposes indirectly the DialogSet the DialogContext uses. As it is, it seems hard to predict whether an application could make use of a potential side effect of that leak in the abstraction. Making this change this way with the internal function doesn't impact that as the internal function takes an already created DialogContext. </blockquote> Please review and, if necessary, port the changes.
2021-06-25T01:33:53
microsoft/botbuilder-python
1,778
microsoft__botbuilder-python-1778
[ "1777" ]
70fee41f4f122143fd27ac22dc17642642d915c3
diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -1,6 +1,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +from typing import List from msrest.serialization import Model from botbuilder.schema import ( Attachment, @@ -1947,7 +1948,7 @@ def __init__( self, *, continuation_token: str = None, - members: [TeamsChannelAccount] = None, + members: List[TeamsChannelAccount] = None, **kwargs ) -> None: super(TeamsPagedMembersResult, self).__init__( @@ -1977,7 +1978,7 @@ class TeamsChannelData(Model): _attribute_map = { "channel": {"key": "channel", "type": "ChannelInfo"}, - "eventType": {"key": "eventType", "type": "str"}, + "event_type": {"key": "eventType", "type": "str"}, "team": {"key": "team", "type": "TeamInfo"}, "notification": {"key": "notification", "type": "NotificationInfo"}, "tenant": {"key": "tenant", "type": "TenantInfo"}, @@ -1988,7 +1989,7 @@ def __init__( self, *, channel=None, - eventType: str = None, + event_type: str = None, team=None, notification=None, tenant=None, @@ -1998,7 +1999,7 @@ def __init__( super(TeamsChannelData, self).__init__(**kwargs) self.channel = channel # doing camel case here since that's how the data comes in - self.event_type = eventType + self.event_type = event_type self.team = team self.notification = notification self.tenant = tenant
Typo in TeamsChannelData _attribute_map, eventType ## Version Bot Framework SDK v4 Python ## Describe the bug There is a typo in the _attribute_map of TeamsChannelData causing a validation error. Which in-turn doesn't allow messages to be send by creating a conversation using TeamsChannelData. Also shouldn't the eventType variable in the TeamsChannelData constructor be event_type. ## To Reproduce Steps to reproduce the behavior: Run the below python snippet ``` from botbuilder.schema.teams import TeamsChannelData teams = TeamsChannelData() teams.validate() ``` We will get an error ``` for attr_name, value in [(attr, getattr(self, attr)) for attr in self._attribute_map]: AttributeError: 'TeamsChannelData' object has no attribute 'eventType' ``` ## Expected behavior The code snippet should run without any error. ## Possible Fix Change the attribute map in TeamsChannelData (botbuilder-python/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py) from ``` _attribute_map = { "channel": {"key": "channel", "type": "ChannelInfo"}, "eventType": {"key": "eventType", "type": "str"}, "team": {"key": "team", "type": "TeamInfo"}, "notification": {"key": "notification", "type": "NotificationInfo"}, "tenant": {"key": "tenant", "type": "TenantInfo"}, "meeting": {"key": "meeting", "type": "TeamsMeetingInfo"}, } ``` to ``` _attribute_map = { "channel": {"key": "channel", "type": "ChannelInfo"}, "event_type": {"key": "eventType", "type": "str"}, "team": {"key": "team", "type": "TeamInfo"}, "notification": {"key": "notification", "type": "NotificationInfo"}, "tenant": {"key": "tenant", "type": "TenantInfo"}, "meeting": {"key": "meeting", "type": "TeamsMeetingInfo"}, } ``` More concisely change the key "eventType" to "event_type" in the _attribute_map dictionary
Thanks for filing this @gokulav137 we will provide a fix on our next release.
2021-07-14T22:56:24
microsoft/botbuilder-python
1,798
microsoft__botbuilder-python-1798
[ "1797" ]
7b6f39f7fd85c0134984bb25b79558da4c6c5218
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py @@ -429,21 +429,21 @@ async def _recognize_token( await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.OK)), + value=InvokeResponse(status=HTTPStatus.OK), ) ) else: await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.NOT_FOUND)), + value=InvokeResponse(status=HTTPStatus.NOT_FOUND), ) ) except Exception: await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.INTERNAL_SERVER_ERROR)), + value=InvokeResponse(status=HTTPStatus.INTERNAL_SERVER_ERROR), ) ) elif self._is_token_exchange_request_invoke(context):
In bot builder dialog after successful login, login_step function is not getting called. ### Github issues for [Python](https://github.com/microsoft/BotBuilder-Samples/issues) /[Microsoft Bot Builder]](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/46.teams-auth)/ [Python] ## Sample information 1. Sample type: [Microsoft Bot Builder] 2. Sample language: [Python] 3. Sample name: <46.teams-auth> We are facing an issue after successful login to the team's bot. The issue is like in teams auth bot it's asking for the login through the sign-in button and on click of it, it opens the pop-up and gets the users details and gets successful login but gives the error "botbuilder/dialogs/prompts/oauth_prompt.py __init__() takes 1 positional argument but 2 were given" This login was working fine a few days back. But even if we are trying to run the official solution given on-site it's not calling to login_step function. and directly sending the error from python packages. ## To Reproduce Steps to reproduce the behavior: 1. download https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/46.teams-auth this sample 2. change the config.py file and put the APP_ID, APP_PASSWORD and CONNECTION_NAME 3. create virtual env of any python version either 3.6, or 3.7 or 3.8 4. Install the requirenment.txt file 5. run the code "python app.py" 6. start the MS bot emulator of version 4.12 or 4.8 also tried 4.4 7. connect to ng rock in settings 8. click on the open bot and enter http://localhost:3978/api/messages 9. send some messages to get the sign-in button 10. Click on sign in and then after successful sign in it does not go to login_step in the local emulator and when we deploy to teams bot service it gives the above error. ## Expected behavior After login, it should go to login_step in the main_dialog.py file and execute further code. ## Screenshots ![image](https://user-images.githubusercontent.com/6695075/131828762-c5c8cb07-a867-49e8-aa81-68c3f25c7ebd.png) ## Additional context we have tried the different versions of python also different version of bot builder packages but still not working.
@maheshpardeshi This is a sample for MS Teams, but you are using it in the Emulator. Does it work in Teams? > @maheshpardeshi This is a sample for MS Teams, but you are using it in the Emulator. Does it work in Teams? Hi Dana, I deployed it to azure bot registration service and check in Teams app, but it did not work there also. I am getting below error in app service for teams app. and error is coming from oauth_prompt.py in that environment where we do not have any control. We have tried the different versions of bot builder. Python 3.8 is running in that service. ``` 2021-09-04T15:25:23.166832069Z: [ERROR] 2021-09-04T15:25:23.166836069Z: [ERROR] [on_turn_error] unhandled error: __init__() takes 1 positional argument but 2 were given 2021-09-04T15:25:23.166839569Z: [ERROR] Traceback (most recent call last): 2021-09-04T15:25:23.166842869Z: [ERROR] File "/tmp/8d96f0e1d99a2bf/antenv/lib/python3.8/site-packages/botbuilder/dialogs/prompts/oauth_prompt.py", line 432, in _recognize_token 2021-09-04T15:25:23.166846469Z: [ERROR] value=InvokeResponse(int(HTTPStatus.OK)), 2021-09-04T15:25:23.166849769Z: [ERROR] TypeError: __init__() takes 1 positional argument but 2 were given 2021-09-04T15:25:23.166853169Z: [ERROR] 2021-09-04T15:25:23.166856269Z: [ERROR] During handling of the above exception, another exception occurred: 2021-09-04T15:25:23.166859669Z: [ERROR] 2021-09-04T15:25:23.166862770Z: [ERROR] Traceback (most recent call last): 2021-09-04T15:25:23.166866170Z: [ERROR] File "/tmp/8d96f0e1d99a2bf/antenv/lib/python3.8/site-packages/botbuilder/core/bot_adapter.py", line 128, in run_pipeline 2021-09-04T15:25:23.166869770Z: [ERROR] return await self._middleware.receive_activity_with_status( 2021-09-04T15:25:23.166873270Z: [ERROR] File "/tmp/8d96f0e1d99a2bf/antenv/lib/python3.8/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status 2021-09-04T15:25:23.166876970Z: [ERROR] return await self.receive_activity_internal(context, callback) 2021-09-04T15:25:23.166880270Z: [ERROR] File "/tmp/8d96f0e1d99a2bf/antenv/lib/python3.8/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal 2021-09-04T15:25:23.166883970Z: [ERROR] return await callback(context) ``` @maheshpardeshi I am able to reproduce and am investigating the root cause. Thank you for your patience. Hi Dana, Thanks for looking into the issue. I feel this issue specifically comes in the team's app only. in web emulator, it works but does not work in teams app. It will be a great help if you can show the direction from here. Hi Dana, Any update on this? @clearab, @tracyboehrer - We are also able to repro the issue raised by developer. Could you please look into it?
2021-09-16T15:45:56
microsoft/botbuilder-python
1,804
microsoft__botbuilder-python-1804
[ "1803" ]
b93c9fb9bb38161712a0af04b12e38f663c4d565
diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py --- a/libraries/botbuilder-applicationinsights/setup.py +++ b/libraries/botbuilder-applicationinsights/setup.py @@ -12,7 +12,7 @@ ] TESTS_REQUIRES = [ "aiounittest==1.3.0", - "django==2.2.6", # For samples + "django==2.2.10", # For samples "djangorestframework==3.10.3", # For samples "flask==1.1.1", # For samples ]
Django Component Governance vulnerability Django 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL. https://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877
2021-09-24T01:48:26
microsoft/botbuilder-python
1,845
microsoft__botbuilder-python-1845
[ "1798" ]
628cd4eaf9d578624c5a1f451ba4ff2def50238a
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/oauth_prompt.py @@ -429,21 +429,21 @@ async def _recognize_token( await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.OK)), + value=InvokeResponse(status=HTTPStatus.OK), ) ) else: await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.NOT_FOUND)), + value=InvokeResponse(status=HTTPStatus.NOT_FOUND), ) ) except Exception: await context.send_activity( Activity( type="invokeResponse", - value=InvokeResponse(int(HTTPStatus.INTERNAL_SERVER_ERROR)), + value=InvokeResponse(status=HTTPStatus.INTERNAL_SERVER_ERROR), ) ) elif self._is_token_exchange_request_invoke(context):
OAuthPrompt was constructing InvokeResponse incorrectly in some cases. Fixes #1797
2021-12-01T19:21:20
microsoft/botbuilder-python
1,888
microsoft__botbuilder-python-1888
[ "1880" ]
e90f08c5eb1fbafd86a046553b9d8c3ea05b7c93
diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py --- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py +++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py @@ -2,6 +2,8 @@ # Licensed under the MIT License. """Middleware Component for logging Activity messages.""" from typing import Awaitable, Callable, List, Dict +from jsonpickle import encode + from botbuilder.schema import Activity, ConversationReference, ActivityTypes from botbuilder.schema.teams import TeamsChannelData, TeamInfo from botframework.connector import Channels @@ -33,7 +35,7 @@ def telemetry_client(self) -> BotTelemetryClient: @property def log_personal_information(self) -> bool: - """ Gets a value indicating whether determines whether to log personal + """Gets a value indicating whether determines whether to log personal information that came from the user.""" return self._log_personal_information @@ -217,10 +219,10 @@ async def fill_send_event_properties( # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples if self.log_personal_information: - if activity.attachments and activity.attachments.strip(): - properties[ - TelemetryConstants.ATTACHMENTS_PROPERTY - ] = activity.attachments + if activity.attachments and len(activity.attachments) > 0: + properties[TelemetryConstants.ATTACHMENTS_PROPERTY] = encode( + activity.attachments + ) if activity.from_property.name and activity.from_property.name.strip(): properties[ TelemetryConstants.FROM_NAME_PROPERTY
An activity with attachments raises an exception in telemetry logger ## Version 4.14.1 ## Describe the bug Activity.attachments is a list. Therefore, telemetry logger raises an exception (see below) if trying to call `strip()` on attachements. ``` File "/Users/ivo/Projects/alvao-chatbot/bot/venv/lib/python3.8/site-packages/botbuilder/core/telemetry_logger_middleware.py", line 220, in fill_send_event_properties if activity.attachments and activity.attachments.strip(): AttributeError: 'list' object has no attribute 'strip' ``` This line produces the bug: https://github.com/microsoft/botbuilder-python/blob/main/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py#L220 ## To Reproduce Steps to reproduce the behavior: 1. Use TelemetryLoggerMiddleware 2. Create a card message: `MessageFactory.attachment(Attachment(content_type="application/vnd.microsoft.card.adaptive", content=card))` 3. Send this activity from bot to user 4. When this activity is being logged via TelemetryLoggerMiddleware, it raises an exception ## Expected behavior There should be another check than `strip()`, possible `len() > 0`. Function `strip` returns a string without leading and trailing whitespaces. There is no sense to do that a) in a condition, b) on a list.
@ivopisarovic - investigating this issue. Are you able to provide code snippets or sample code? Sure, I have prepared a demo. You can set your `AppInsightsInstrumentationKey` in `config.py`, start it and send any message to the bot via Emulator. The error occurs when the bot tries to reply. https://github.com/ivopisarovic/botbuilder-telemetry-problem The demo is based on the official [tutorial](https://docs.microsoft.com/en-us/azure/bot-service/bot-service-quickstart-create-bot?view=azure-bot-service-4.0&tabs=python%2Cvs). It uses the echo bot sample. Besides that, the telemetry was initialised and an attachment message is used instead of the default text message. See all the [changes](https://github.com/ivopisarovic/botbuilder-telemetry-problem/commit/d74867c57efd1260666189d772282cdad5944293).
2022-01-13T21:14:12
microsoft/botbuilder-python
1,889
microsoft__botbuilder-python-1889
[ "1872" ]
16f55f8cbf4a21b63700e36af4a19de1e2099886
diff --git a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py --- a/libraries/botbuilder-core/botbuilder/core/serializer_helper.py +++ b/libraries/botbuilder-core/botbuilder/core/serializer_helper.py @@ -1,6 +1,6 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. - +from copy import copy from inspect import getmembers from typing import Type from enum import Enum @@ -25,6 +25,9 @@ def deserializer_helper(msrest_cls: Type[Model], dict_to_deserialize: dict) -> Model: deserializer = Deserializer(DEPENDICIES_DICT) + _clean_data_for_serialization( + deserializer.dependencies[msrest_cls.__name__], dict_to_deserialize + ) return deserializer(msrest_cls.__name__, dict_to_deserialize) @@ -35,3 +38,21 @@ def serializer_helper(object_to_serialize: Model) -> dict: serializer = Serializer(DEPENDICIES_DICT) # pylint: disable=protected-access return serializer._serialize(object_to_serialize) + + +def _clean_data_for_serialization(msrest_cls: Type[Model], dict_to_deserialize: dict): + # pylint: disable=protected-access + # Clean channel response of empty strings for expected objects. + if not isinstance(dict_to_deserialize, dict): + return + serialization_model = copy(msrest_cls._attribute_map) + for key, value in msrest_cls._attribute_map.items(): + if key != value["key"]: + serialization_model[value["key"]] = value + for prop, prop_value in dict_to_deserialize.items(): + if ( + prop in serialization_model + and serialization_model[prop]["type"] in DEPENDICIES_DICT + and not prop_value + ): + dict_to_deserialize[prop] = None
diff --git a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py --- a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py +++ b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py @@ -1003,6 +1003,25 @@ async def test_on_teams_task_module_fetch(self): assert bot.record[0] == "on_invoke_activity" assert bot.record[1] == "on_teams_task_module_fetch" + async def test_on_teams_task_module_fetch_none_as_empty(self): + # Arrange + activity = Activity( + type=ActivityTypes.invoke, + name="task/fetch", + value={"data": {"key": "value"}, "context": "",}, + ) + + turn_context = TurnContext(SimpleAdapter(), activity) + + # Act + bot = TestingTeamsActivityHandler() + await bot.on_turn(turn_context) + + # Assert + assert len(bot.record) == 2 + assert bot.record[0] == "on_invoke_activity" + assert bot.record[1] == "on_teams_task_module_fetch" + async def test_on_teams_task_module_submit(self): # Arrange activity = Activity(
Teams Task Module - Deserialization Error on Teams mobile app for iOS ## Version botbuilder-integration-aiohttp 4.14.0 Python 3.8.6 ## Describe the bug Error when loading Task Module on iOS iOS 14.8.1 / MS Teams v3.20.0 ## To Reproduce 1. Deploy [sample bot 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module) 2. Say hello and click on _Adaptive Card_ button 3. Deserialization Error when on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 ![image](https://user-images.githubusercontent.com/4013036/146412591-61399a75-d3d3-4eb6-a0ec-36ffa3cac54c.png) ## Traceback _(file locations prefix intentionally removed)_ ``` File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) AttributeError: 'str' object has no attribute 'get' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "test_teams_task/env/lib/site-packages/botbuilder/core/bot_adapter.py", line 129, in run_pipeline context, callback File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "test_teams_task/env/lib/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "test_teams_task/env/lib/site-packages/botbuilder/core/activity_handler.py", line 78, in on_turn invoke_response = await self.on_invoke_activity(turn_context) File "test_teams_task/env/lib/site-packages/botbuilder/core/teams/teams_activity_handler.py", line 155, in on_invoke_activity TaskModuleRequest, turn_context.activity.value File "test_teams_task/env/lib/site-packages/botbuilder/core/serializer_helper.py", line 28, in deserializer_helper return deserializer(msrest_cls.__name__, dict_to_deserialize) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1233, in __call__ return self._deserialize(target_obj, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1299, in _deserialize value = self.deserialize_data(raw_value, attr_desc['type']) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1468, in deserialize_data return self._deserialize(obj_type, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1303, in _deserialize raise_with_traceback(DeserializationError, msg, err) File "test_teams_task/env/lib/site-packages/msrest/exceptions.py", line 51, in raise_with_traceback raise error.with_traceback(exc_traceback) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1293, in _deserialize found_value = key_extractor(attr, attr_desc, data) File "test_teams_task/env/lib/site-packages/msrest/serialization.py", line 1064, in rest_key_extractor return working_data.get(key) msrest.exceptions.DeserializationError: Unable to deserialize to object: type, AttributeError: 'str' object has no attribute 'get' ``` ## Expected behavior This sample bot raises no error when interacting on the following platforms: - Windows 10 (Desktop app, Firefox, Chrome) - macOS (Chrome) - Android (Mobile app) ![image](https://user-images.githubusercontent.com/4013036/146413680-7bc42c4d-9876-4d18-9a61-7b94b4a5cccb.png) It was possible to interact with Task Module on iOS iOS 14.8.1 / Microsoft Teams v3.20.0 when deploying these samples (python not included): https://docs.microsoft.com/en-us/samples/officedev/microsoft-teams-samples/ms-teams-task-sample/ ## Additional context Initially the error was detected on a bot in production currently deployed in Azure. Since the error message is the same when running [bot sample 54.teams-task-module](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/54.teams-task-module), for the sake of repro we can take this example.
This issue could take up a couple of day to get repro. Actively working on it, will post an update when available. Was able to repro, this is due to a change in the Teams client for iOS. Will try to provide a fix soon. Thanks. @axelsrz Any updates on this issue?
2022-01-13T22:41:31
microsoft/botbuilder-python
1,907
microsoft__botbuilder-python-1907
[ "1882" ]
bafc08de010a3da41d4921250ef1fb908e08f9cc
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/prompts/prompt_culture_models.py @@ -174,6 +174,7 @@ def get_supported_cultures(cls) -> List[PromptCultureModel]: """ return [ cls.Chinese, + cls.German, cls.Dutch, cls.English, cls.French,
German language is not appropiate used when using Confirmprompts ### The Issue I am building a chatbot for german users. I am sending the local "de-de" as user, and can confirm this actual arrives the bot. When i want to use Confirmprompts the bot returns Yes and No and not "Ja" "Nein". ### The Solution After a lot of digging, I found the underlying cause and a fix. The culture model does not actually recognices German (de-de) as supported language, and thus switches to the default (english). But in the prompt_culture_models.py German actualy exists and ther is a todo "# TODO: Replace with Culture.German after Recognizers-Text package updates." Which I looked up and the Recognizers-Text package sis already updated :) . Still this is not the real issue. The reason is that german is not listed in the supported cultures function. I simply added it and every thing works fine. ` @classmethod def get_supported_cultures(cls) -> List[PromptCultureModel]: """ Gets a list of the supported culture models. """ return [ cls.Chinese, cls.German, cls.Dutch, cls.English, cls.French, cls.Italian, cls.Japanese, cls.Korean, cls.Portuguese, cls.Spanish, cls.Turkish, ]`
Hi @PascalHessler -- thanks, looking into this issue.
2022-01-28T21:45:46
microsoft/botbuilder-python
1,919
microsoft__botbuilder-python-1919
[ "1908" ]
edbe3b164c4ff7866b945b43dc24f7efd9d6c02e
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py @@ -290,7 +290,10 @@ def try_get_value( # TODO: HACK to support .First() retrieval on turn.recognized.entities.foo, replace with Expressions once # expressions ship first = ".FIRST()" - i_first = path.upper().rindex(first) + try: + i_first = path.upper().rindex(first) + except ValueError: + i_first = -1 if i_first >= 0: remaining_path = path[i_first + len(first) :] path = path[0:i_first]
Bug in set_value of StateManager ## Version Latest ## Describe the bug When I try to set the state value via the state manager I get the following error: ``` File "/home/robin/dev/ask-waldo/ask-waldo-teams-chatbot/.venv/lib/python3.8/site-packages/botbuilder/dialogs/memory/dialog_state_manager.py", line 293, in try_get_value i_first = path.upper().rindex(first) ValueError: substring not found ``` ## To Reproduce Steps to reproduce the behavior: Call set_value on state_manager: ` step_context.state.set_value("user.path", "a value")` ## Expected behavior I do not receive an error and the state is updated. ## Additional context This can be fixed as follows: In file: `botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/dialog_state_manager.py` Update: ` i_first = path.upper().rindex(first)` To: ``` try: i_first = path.upper().rindex(first) except ValueError: i_first = -1 ``` Or just fix that hacky thing that is being done there :)
Hi @RobinVds, I'm looking into this issue.
2022-02-04T00:52:13
microsoft/botbuilder-python
1,930
microsoft__botbuilder-python-1930
[ "1929" ]
b11e029cb6b0d97273b046ef1c05c067c029b3ff
diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py --- a/libraries/botbuilder-schema/setup.py +++ b/libraries/botbuilder-schema/setup.py @@ -6,7 +6,7 @@ NAME = "botbuilder-schema" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" -REQUIRES = ["msrest==0.6.10"] +REQUIRES = ["msrest==0.6.19"] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -7,7 +7,7 @@ NAME = "botframework-connector" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" REQUIRES = [ - "msrest==0.6.10", + "msrest==0.6.19", "requests>=2.23.0,<2.26", "PyJWT>=1.5.3,<2.0.0", "botbuilder-schema==4.15.0",
Bump msrest to the 0.6.19 or higher Is your feature request related to a problem? Please describe. Old version of msrest is used in botframework components -> https://github.com/microsoft/botbuilder-python/search?q=msrest%3D%3D0.6.10 . This blocks us to use latest versions of the service bus client or event using the new language studio python libraries. With msrest=0.6.10, we're blocked to using 0.50 service bus package and other packages like event grid. Describe the solution you'd like EDITED: Upgrade msrest to the at least 0.6.19 or higher. Describe alternatives you've considered No alternatives.
2022-03-03T23:32:05
microsoft/botbuilder-python
1,932
microsoft__botbuilder-python-1932
[ "1788" ]
011408845894b68d57d11f478e55024ce4035bc3
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -11,7 +11,7 @@ "requests>=2.23.0,<2.26", "PyJWT>=1.5.3,<2.0.0", "botbuilder-schema==4.15.0", - "msal==1.6.0", + "msal==1.17.0", ] root = os.path.abspath(os.path.dirname(__file__))
Bump MSAL to the latest version **Is your feature request related to a problem? Please describe.** Old version of MSAL is used in [botframework-connector](https://github.com/microsoft/botbuilder-python/blob/main/libraries/botframework-connector/requirements.txt#L6) (v1.6.0) **Describe the solution you'd like** Upgrade to the [latest version](https://github.com/AzureAD/microsoft-authentication-library-for-python/releases) (v1.13.0 is the latest at this moment). **Describe alternatives you've considered** No alternatives. **Additional context** Please also consider to not pin this dependency (#1467).
I want to add. Not only msal uses the old version, but also requests
2022-03-11T12:30:53
microsoft/botbuilder-python
1,957
microsoft__botbuilder-python-1957
[ "1935" ]
31d0ffe2c079afe4c437e0de12dfbc2ef8c08964
diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py --- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py +++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py @@ -303,10 +303,12 @@ def __populate_additional_channel_properties( properties: dict, ): if activity.channel_id == Channels.ms_teams: - teams_channel_data: TeamsChannelData = activity.channel_data + teams_channel_data: TeamsChannelData = TeamsChannelData().deserialize( + activity.channel_data + ) properties["TeamsTenantId"] = ( - teams_channel_data.tenant + teams_channel_data.tenant.id if teams_channel_data and teams_channel_data.tenant else "" ) diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py --- a/libraries/botbuilder-dialogs/setup.py +++ b/libraries/botbuilder-dialogs/setup.py @@ -6,6 +6,7 @@ REQUIRES = [ "regex<=2019.08.19", + "emoji==1.7.0", "recognizers-text-date-time>=1.0.2a1", "recognizers-text-number-with-unit>=1.0.2a1", "recognizers-text-number>=1.0.2a1",
diff --git a/libraries/botbuilder-core/tests/test_telemetry_middleware.py b/libraries/botbuilder-core/tests/test_telemetry_middleware.py --- a/libraries/botbuilder-core/tests/test_telemetry_middleware.py +++ b/libraries/botbuilder-core/tests/test_telemetry_middleware.py @@ -251,7 +251,7 @@ async def test_log_teams(self): channel_data = TeamsChannelData( team=team_info, tenant=TenantInfo(id="tenantId"), - ) + ).serialize() activity = MessageFactory.text("test") activity.channel_data = channel_data @@ -272,7 +272,7 @@ async def test_log_teams(self): "fromId": "userId", "recipientId": "bot", "recipientName": "Bot", - "TeamsTenantId": TenantInfo(id="tenantId"), + "TeamsTenantId": "tenantId", "TeamsUserAadObjectId": "aaId", "TeamsTeamInfo": TeamInfo.serialize(team_info), },
Incorrect channel_data data type in TelemetryLoggerMiddleware ## Version 4.14 ## Describe the bug Channel data from Teams are incorrectly used in Telemetry Middleware. Bot is not able to respond to any message sent via Teams when the Telemetry is enabled. The same bot works fine when tested in Bot Framework Emulator. ## To Reproduce Steps to reproduce the behavior: 1. Add Telemetry to your Python Bot: ``` TELEMETRY_CLIENT = ApplicationInsightsTelemetryClient( CONFIG.APPINSIGHTS_INSTRUMENTATION_KEY, telemetry_processor=AiohttpTelemetryProcessor(), client_queue_size=1 ) TELEMETRY_LOGGER_MIDDLEWARE = TelemetryLoggerMiddleware(TELEMETRY_CLIENT, True) ADAPTER.use(TELEMETRY_LOGGER_MIDDLEWARE) ... APP = web.Application(middlewares=[bot_telemetry_middleware, aiohttp_error_middleware]) ``` 2. Deploy to Azure and connect to Teams 3. Send a message from Teams to the bot. 4. See error: ``` 2022-04-21T10:55:29.178590260Z TelemetryLoggerMiddleware.__populate_additional_channel_properties( 2022-04-21T10:55:29.178594560Z File "/usr/local/lib/python3.8/site-packages/botbuilder/core/telemetry_logger_middleware.py", line 307, in __populate_additional_channel_properties 2022-04-21T10:55:29.178599160Z if teams_channel_data and teams_channel_data.tenant 2022-04-21T10:55:29.178603361Z AttributeError: 'dict' object has no attribute 'tenant' ``` ## Additional context I have found out that `activity.channel_data` is a dict, not an object. Therefore, I changed the following method in `telemetry_logger_middleware.py` to use the dict instead of the object and now it works fine. ``` @staticmethod def __populate_additional_channel_properties( activity: Activity, properties: dict, ): if activity.channel_id == Channels.ms_teams: teams_channel_data: TeamsChannelData = activity.channel_data properties["TeamsTenantId"] = ( teams_channel_data['tenant']['id'] if teams_channel_data and teams_channel_data.get("tenant", {}).get("id", None) else "" ) properties["TeamsUserAadObjectId"] = ( activity.from_property.aad_object_id if activity.from_property else "" ) if teams_channel_data and teams_channel_data.get("team", None): properties["TeamsTeamInfo"] = TeamInfo.serialize( teams_channel_data['team'] ) ```
Hi @ivopisarovic, Thanks for your report. I am investigating. Hi @ivopisarovic, I took the Python core bot sample with telemetry and connected to it through Teams, and I am unable to reproduce this error. A few clarification questions: - You are unable to get a response back to *any* message sent to the bot? - Do you get welcome messages? - Does the bot work without telemetry? - What type of bot is this? (Single Tenant, Multi, Managed Identity) - Did you upload a Teams manifest, or did you connect via the "Open in Teams" link in the channel menu? Thanks for looking into the problem. See my answers below. I will try to do further testing and I will try to prepare a demo, so you can test my code. > You are unable to get a response back to any message sent to the bot? Exactly, no response. > Do you get welcome messages? No, even the welcome message sends some logs to Insights. Therefore, even this message fails. > Does the bot work without telemetry? Good question, I have not tested it. But after the change I mentioned earlier it works. > What type of bot is this? (Single Tenant, Multi, Managed Identity) It is a multi tenant bot. > Did you upload a Teams manifest, or did you connect via the "Open in Teams" link in the channel menu? I used the Open in Teams link. > I used the Open in Teams link. One thing I'd always recommend is using an app manifest. Sometimes bots don't work properly if you use `Open in Teams`, so app manifests are always your best bet. Adding an app manifest is effectively installing an app, which is what you would be doing if you deployed your bot as a production Teams app. If you haven't used a manifest before, you should be able to make use of one from another sample, such as [this one](https://github.com/microsoft/BotBuilder-Samples/tree/main/samples/python/57.teams-conversation-bot/teams_app_manifest). Simply fill in the marked fields, zip all three files into one archive named `manifest`, and upload this as a custom app to Teams. Closing this issue due to inactivity. If you still require assistance, please comment and we can reopen the issue.
2022-07-29T14:44:46
microsoft/botbuilder-python
1,970
microsoft__botbuilder-python-1970
[ "1933" ]
58a036e7f7c2c6afd55c050d76563df57a082c34
diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py --- a/libraries/botbuilder-schema/setup.py +++ b/libraries/botbuilder-schema/setup.py @@ -6,7 +6,7 @@ NAME = "botbuilder-schema" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" -REQUIRES = ["msrest==0.6.19"] +REQUIRES = ["msrest==0.6.*"] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -7,11 +7,11 @@ NAME = "botframework-connector" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" REQUIRES = [ - "msrest==0.6.19", + "msrest==0.6.*", "requests>=2.23.0,<2.26", "PyJWT>=1.5.3,<2.0.0", "botbuilder-schema==4.15.0", - "msal==1.17.0", + "msal==1.*", ] root = os.path.abspath(os.path.dirname(__file__))
Version dependency between Botbuilder and Question Answering ### [Github issues](https://github.com/Microsoft/botbuilder-python) should be used for bugs and feature requests. Use [Stack Overflow](https://stackoverflow.com/questions/tagged/botframework) for general "how-to" questions. ## Version Botbuilder 4.14.0 azure-ai-language-questionanswering 1.0.0 ## Describe the bug When building a Python bot using Question Answering and Botbuilder: The package azure-ai-language-questionanswering actual version 1.0.0 requires at least msrest version 0.6.21 but the actual version of Botbuilder (-schema/core/ai/etc) 4.14.2 requires msrest to be 0.6.19 (though I'm using 4.14.0 and the version doesn't match either). There's no way these two packages work together right now because of the msrest version dependency. ## To Reproduce 1. Create a requirements.txt file with these dependencies azure-ai-language-questionanswering==1.0.0 botbuilder-ai==4.14.0 botbuilder-core==4.14.0 botbuilder-dialogs==4.14.0 botbuilder-integration-aiohttp==4.14.0 botbuilder-schema==4.14.0 2. Install dependencies of a bot project with pip install -r requirements.txt ## Expected behavior Upgrade Botbuilder to work with msrest version 0.6.21 OR Decrease Question Answering version dependency of msrest to 0.6.19 or lower
@axelsrz Does this issue need a fix to botbuilder-python dependency on msrest package? Thanks! Hi @tracyboehrer @axelsrz..do you suggest any workaround until this is fixed ? I've a bot that uses question answering and I'm not able to deploy to azure as it fails whiles trying to resolve the dependencies. If its a simple update of the requirements, do you suggest submitting a PR ?
2022-10-25T23:29:04
microsoft/botbuilder-python
2,037
microsoft__botbuilder-python-2037
[ "2036" ]
8e714268cd288ef646873291f6dcc4e265ab95eb
diff --git a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py --- a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py +++ b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py @@ -150,6 +150,7 @@ async def continue_conversation( # pylint: disable=arguments-differ self, reference: ConversationReference, callback: Callable, + bot_app_id: str, ): """ Sends a proactive message to a conversation. @@ -161,9 +162,12 @@ async def continue_conversation( # pylint: disable=arguments-differ :type reference: :class:`botbuilder.schema.ConversationReference` :param callback: The method to call for the resulting bot turn. :type callback: :class:`typing.Callable` + :param bot_app_id: The application Id of the bot. This is the appId returned by the Azure portal registration, + and is generally found in the `MicrosoftAppId` parameter in `config.py`. + :type bot_app_id: :class:`typing.str` """ return await self.process_proactive( - self.create_claims_identity(), + self.create_claims_identity(bot_app_id), get_continuation_activity(reference), None, callback, @@ -182,7 +186,7 @@ async def continue_conversation_with_claims( async def create_conversation( # pylint: disable=arguments-differ self, - bot_app_id: ConversationReference, + bot_app_id: str, callback: Callable[[TurnContext], Awaitable] = None, conversation_parameters: ConversationParameters = None, channel_id: str = None,
CloudAdapter.continue_conversation() is missing `bot_app_id` parameter ## Version 4.14.6 ## Describe the bug Posting a message in a group chat throws an exception ## To Reproduce ```python adapter = CloudAdapter(ConfigurationBotFrameworkAuthentication(config)) conversation_reference = ConversationReference( service_url=config.BASE_URL, channel_id="msteams", conversation=ConversationAccount( id=${CHAT_ID}, is_group = True, ), ) await adapter.continue_conversation( reference=conversation_reference, callback=lambda turn_context: turn_context.send_activity("Hello!"), ) ``` Exception: ```bash Traceback (most recent call last): *** removed for brevity **** result = await adapter.continue_conversation( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/konstantinplis/.pyenv/versions/3.11.0/lib/python3.11/site-packages/botbuilder/core/cloud_adapter_base.py", line 165, in continue_conversation return await self.process_proactive( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/konstantinplis/.pyenv/versions/3.11.0/lib/python3.11/site-packages/botbuilder/core/cloud_adapter_base.py", line 262, in process_proactive connector_client = await connector_factory.create( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/konstantinplis/.pyenv/versions/3.11.0/lib/python3.11/site-packages/botframework/connector/auth/_connector_factory_impl.py", line 37, in create credentials = await self._credential_factory.create_credentials( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/konstantinplis/.pyenv/versions/3.11.0/lib/python3.11/site-packages/botframework/connector/auth/password_service_client_credential_factory.py", line 35, in create_credentials raise Exception("Invalid app_id") Exception: Invalid app_id ``` ## Expected behavior Message sent ## Additional context It seems that `bot_app_id` must be passed to `CloudAdapter.continue_conversation()` and subsequently to `create_claims_identity(bot_app_id)`
2023-11-09T06:23:55
microsoft/botbuilder-python
2,050
microsoft__botbuilder-python-2050
[ "2019" ]
c5c276de97168a0b68b439023b2b8cdfee23d520
diff --git a/libraries/botbuilder-dialogs/setup.py b/libraries/botbuilder-dialogs/setup.py --- a/libraries/botbuilder-dialogs/setup.py +++ b/libraries/botbuilder-dialogs/setup.py @@ -5,7 +5,7 @@ from setuptools import setup REQUIRES = [ - "regex<=2019.08.19", + "regex>=2022.1.18", "emoji==1.7.0", "recognizers-text-date-time>=1.0.2a1", "recognizers-text-number-with-unit>=1.0.2a1",
botbuidler support for regex== 2022 and above Description: I'm currently working on building a chatbot using Azure Bot Builder SDK in conjunction with OpenAI. In my project, I'm relying on the OpenAIEmbedding class from the langchain package, which utilizes Tiktoken. However, I've run into an issue due to dependency conflicts with Tiktoken. Specifically, Tiktoken requires regex version 2022 or higher, while the Bot Builder package supports only up to regex version 2019. Feature Request: I kindly request adding support for Tiktoken's regex version 2022 or higher in the OpenAIEmbedding class within the langchain package. This update would resolve the dependency conflicts and enable smoother integration of OpenAI into projects using Azure Bot Builder SDK. Additional Information: Current Behavior: Currently, the OpenAIEmbedding class in langchain relies on Tiktoken, which necessitates a regex version that is not compatible with the Bot Builder SDK's regex version support. Desired Behavior: The botbuilder classes should be updated to support Tiktoken's dependency on regex version 2022 or higher t Impact of the Feature: This feature would benefit developers working on chatbot projects that use Azure Bot Builder SDK and OpenAI. It would eliminate dependency conflicts, allowing for a seamless integration experience.
Thanx for assigning this ramfattah, this feature has a great impact on Microsoft ecosystem since azure open ai and botbuilder can easily work together Thanks @mohdhammad786, I'll discuss this internally with the Bot Framework SDK engineers about this issue. We will report back soon. Thanks. > Thanks @mohdhammad786, I'll discuss this internally with the Bot Framework SDK engineers about this issue. > > We will report back soon. Thanks. Thanx ramfattah for such quick action on this Hello @mohdhammad786, Thank you for your request. Our Bot Framework Python SDK will soon end support in November 2023. Because of this, we are focusing on security fixes. We will add your request to our backlog, but we don't have an ETA at the moment. Thank you. CC @tracyboehrer
2023-12-05T01:18:55
microsoft/botbuilder-python
2,051
microsoft__botbuilder-python-2051
[ "2014" ]
3de254694d05131d830c7e15a5bea6add55a6afa
diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py --- a/libraries/botbuilder-schema/setup.py +++ b/libraries/botbuilder-schema/setup.py @@ -6,7 +6,7 @@ NAME = "botbuilder-schema" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" -REQUIRES = ["msrest==0.6.*", "urllib3<2.0.0"] +REQUIRES = ["msrest== 0.7.*", "urllib3<2.0.0"] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -7,7 +7,7 @@ NAME = "botframework-connector" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" REQUIRES = [ - "msrest==0.6.*", + "msrest==0.7.*", # "requests>=2.23.0,<2.26", "PyJWT>=2.4.0", "botbuilder-schema==4.15.0",
Advancing msrest version dependency ### Use this [query](https://github.com/Microsoft/botbuilder-python/issues?q=is%3Aissue+is%3Aopen++label%3Afeature-request+) to search for the most popular feature requests. _No open issues are reported for msrest._ **Is your feature request related to a problem? Please describe.** When installing a solution accelerator for OpenAI, the solution requires the bot framework. However, aligning the requirements requires downgrading the msrest package. botbuilder-schema 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible. botframework-connector 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible. Several key azure packages depend on msrest: azure-mgmt-authorization 3.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-containerregistry 10.1.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-resource 22.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-search 9.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-storage 21.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. **Describe the solution you'd like** Align the entire botbuilder-python with msrest >= 0.7.1 **Describe alternatives you've considered** Since my part of the group collaboration does not require development on the botbuilder-python, my workaround in installing msrest back to 0.7.1. Though, it would be good for botbuilder-python to be aligned with the azure-mgmt packages. **Additional context** Our team is forking this solution accelerator for customers deploying an accelerator solution which includes resources including Azure OpenAI, Azure Cognitive Search, and Azure CosmosDB (in addition to the bot), so the alignment will be important since the lost functionality in azure-mgmt is unknown. The original source is at https://github.com/MSUSAzureAccelerators/Azure-Cognitive-Search-Azure-OpenAI-Accelerator
2023-12-05T01:31:51
microsoft/botbuilder-python
2,057
microsoft__botbuilder-python-2057
[ "2040" ]
e61d4b39ec5ddb59bcbe63588079e1cecc8e4766
diff --git a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py --- a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py +++ b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py @@ -100,7 +100,7 @@ async def send_activities( ) ) - response = response or ResourceResponse(activity.id or "") + response = response or ResourceResponse(id=activity.id or "") responses.append(response)
diff --git a/libraries/botbuilder-core/tests/simple_adapter.py b/libraries/botbuilder-core/tests/simple_adapter.py --- a/libraries/botbuilder-core/tests/simple_adapter.py +++ b/libraries/botbuilder-core/tests/simple_adapter.py @@ -75,7 +75,7 @@ async def update_activity(self, context: TurnContext, activity: Activity): if self._call_on_update is not None: self._call_on_update(activity) - return ResourceResponse(activity.id) + return ResourceResponse(id=activity.id) async def process_request(self, activity, handler): context = TurnContext(self, activity) diff --git a/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py b/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py --- a/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py +++ b/libraries/botbuilder-core/tests/teams/simple_adapter_with_create_conversation.py @@ -76,7 +76,7 @@ async def update_activity(self, context: TurnContext, activity: Activity): if self._call_on_update is not None: self._call_on_update(activity) - return ResourceResponse(activity.id) + return ResourceResponse(id=activity.id) async def process_request(self, activity, handler): context = TurnContext(self, activity)
4.14.6 CloudAdapter fails to send Typing Activity in Teams ## Version botbuilder-core 4.14.6 botbuilder-integration-aiohttp 4.14.6 botbuilder-schema 4.14.6 ## Describe the bug I am unable to send typing indicators with the `ShowTypingMiddleware` middleware, `turn_context.send_activity`, and `turn_context.send_activities`. ## To Reproduce Create a bot ``` cfg = DefaultConfig() adapter = CloudAdapter(ConfigurationBotFrameworkAuthentication(cfg)) bot = Bot() ``` define on_message_activity [From documentation](https://learn.microsoft.com/en-us/azure/bot-service/bot-builder-howto-send-messages?view=azure-bot-service-4.0&tabs=python) ``` async def on_message_activity(self, turn_context: TurnContext): # pylint: disable=unused-argument if turn_context.activity.text == "wait": return await turn_context.send_activities([ Activity( type=ActivityTypes.typing ), Activity( type="delay", value=3000 ), Activity( type=ActivityTypes.message, text="Finished Typing" ) ]) else: return await turn_context.send_activity( f"You said {turn_context.activity.text}. Say 'wait' to watch me type." ) ``` Publish in azure, set up MS Teams channel. send 'wait' via Microsoft Teams stacktrace: ``` Traceback (most recent call last): File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/bot_adapter.py", line 174, in run_pipeline return await self._middleware.receive_activity_with_status( File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/activity_handler.py", line 70, in on_turn await self.on_message_activity(turn_context) File "/home/josh/ctrlstack/babelfish/askbot/microsoft-teams/src/bot.py", line 78, in on_message_activity return await turn_context.send_activities([ File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 225, in send_activities return await self._emit(self._on_send_activities, output, logic()) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 303, in _emit return await logic File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/turn_context.py", line 220, in logic responses = await self.adapter.send_activities(self, output) File "path-to-virtual-env/lib/python3.10/site-packages/botbuilder/core/cloud_adapter_base.py", line 103, in send_activities response = response or ResourceResponse(activity.id or "") ``` ## Expected behavior the typing indicator for 3 seconds.
I encounter similar bug, it get trigger when deployed the bot to teams, run in emulator is fine. the problem is position "*" in `__init__function (class ResourceResponse(Model): ` in _models_py3.py , make it the `id` must be call as keyword, but the caller in `cloud_adapter_base.py:103` doesn't specific the keyword for the parameter. response = response or ResourceResponse(activity.id or "") ```python class ResourceResponse(Model): """A response containing a resource ID. :param id: Id of the resource :type id: str """ _attribute_map = {"id": {"key": "id", "type": "str"}} def __init__(self, *, id: str = None, **kwargs) -> None: super(ResourceResponse, self).__init__(**kwargs) self.id = id ``` cloud_adapter_base.py ```python response = response or ResourceResponse(activity.id or "") ``` Suggest to update all callers include cloud_adapter_base.py to include the "id=" eg: ```python response = response or ResourceResponse(id=activity.id or "") ``` Same issue encountered.
2023-12-14T17:15:04
microsoft/botbuilder-python
2,062
microsoft__botbuilder-python-2062
[ "2061" ]
7a03e6554735bd6ddaeca009720008b6e128796c
diff --git a/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py b/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py --- a/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py +++ b/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py @@ -10,7 +10,7 @@ teams_get_meeting_info, teams_get_channel_data, ) -from botbuilder.core import CloudAdapterBase, BotFrameworkAdapter, TurnContext +from botbuilder.core import CloudAdapterBase, BotFrameworkAdapter, TurnContext, BotAdapter from botbuilder.schema import Activity, ConversationParameters, ConversationReference from botbuilder.schema.teams import ( ChannelInfo, @@ -318,10 +318,15 @@ def get_team_id(turn_context: TurnContext): @staticmethod async def _get_connector_client(turn_context: TurnContext) -> ConnectorClient: - return await turn_context.adapter.create_connector_client( - turn_context.activity.service_url + connector_client = turn_context.turn_state.get( + BotAdapter.BOT_CONNECTOR_CLIENT_KEY ) + if connector_client is None: + raise ValueError('This method requires a connector client.') + + return connector_client + @staticmethod async def _get_members( connector_client: ConnectorClient, conversation_id: str
diff --git a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py --- a/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py +++ b/libraries/botbuilder-core/tests/teams/test_teams_activity_handler.py @@ -592,6 +592,9 @@ async def test_on_teams_members_added_activity(self): turn_context = TurnContext(SimpleAdapter(), activity) + mock_connector_client = await SimpleAdapter.create_connector_client(self, turn_context.activity.service_url) + turn_context.turn_state[BotAdapter.BOT_CONNECTOR_CLIENT_KEY] = mock_connector_client + # Act bot = TestingTeamsActivityHandler() await bot.on_turn(turn_context)
CloudAdapter' object has no attribute 'create_connector_client' Version 4.14.7 In my MS Teams bot i want to loop over all members that are already in the Team when i add the bot. To do this i wrote ``` async def on_installation_update_add(self, turn_context: TurnContext): # Get the team details team_details = await TeamsInfo.get_team_details(turn_context) if team_details: team_id = team_details.id # Retrieve all members of the team team_members = await TeamsInfo.get_team_members(turn_context, team_id) for member in team_members: if member.id != turn_context.activity.recipient.id: # Avoid messaging the bot itself # Prepare the message ``` It seems that TeamsInfo.get_team_members(turn_context, team_id) is not compatible with CloudAdapter. settings is not a known attribute of class <class 'botbuilder.schema.teams._models_py3.TeamsChannelData'> and will be ignored source is not a known attribute of class <class 'botbuilder.schema.teams._models_py3.TeamsChannelData'> and will be ignored [on_turn_error] unhandled error: 'CloudAdapter' object has no attribute 'create_connector_client' Traceback (most recent call last): File "\env\lib\site-packages\botbuilder\core\bot_adapter.py", line 174, in run_pipeline return await self._middleware.receive_activity_with_status( File "\env\lib\site-packages\botbuilder\core\middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "\env\lib\site-packages\botbuilder\core\middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "/app\bots\main_bot.py", line 30, in on_turn await self.teams_bot.on_turn(turn_context) File "\env\lib\site-packages\botbuilder\core\activity_handler.py", line 92, in on_turn await self.on_installation_update(turn_context) File "\env\lib\site-packages\botbuilder\core\activity_handler.py", line 384, in on_installation_update return await self.on_installation_update_add(turn_context) File "/app\bots\teams_bot.py", line 42, in on_installation_update_add team_details = await TeamsInfo.get_team_details(turn_context) File "\env\lib\site-packages\botbuilder\core\teams\teams_info.py", line 120, in get_team_details teams_connector = await TeamsInfo.get_teams_connector_client(turn_context) File "\env\lib\site-packages\botbuilder\core\teams\teams_info.py", line 305, in get_teams_connector_client connector_client = await TeamsInfo._get_connector_client(turn_context) File "\env\lib\site-packages\botbuilder\core\teams\teams_info.py", line 321, in _get_connector_client return await turn_context.adapter.create_connector_client( AttributeError: 'CloudAdapter' object has no attribute 'create_connector_client' Thanks for having a look
Thanks @BasileBerckmoes, I can look into this. @rampaged thank you! I was exploring the code myself to see if i could contribute but i need to progress my understanding of the framework first Hi @BasileBerckmoes, I'm able to reproduce this issue in Teams. I used sample [58.teams-start-thread-in-channel](https://github.com/microsoft/BotBuilder-Samples/tree/main/archive/samples/python/58.teams-start-thread-in-channel) to test this issue. I added this piece of code to reproduce the issue in `on_message_activity` method: ```py team_details = await TeamsInfo.get_team_details(turn_context) if team_details: team_id = team_details.id # Retrieve all members of the team team_members = await TeamsInfo.get_team_members(turn_context, team_id) for member in team_members: if member.id != turn_context.activity.recipient.id: # Avoid messaging the bot itself # Prepare the message await turn_context.send_activity( "some message here" ) ``` Attached a bot that demonstrates the problem. Here are the steps I followed to verify it: [58.teams-thread.zip](https://github.com/microsoft/botbuilder-python/files/13996535/58.teams-thread.zip) 1. Unzip the attached bot and follow this [readme](https://github.com/microsoft/BotBuilder-Samples/tree/main/archive/samples/python/58.teams-start-thread-in-channel#readme) to set up and configure the bot locally 2. Sideload the bot in Teams and add it to a Teams team in a channel 3. start a new post and @ mention the bot (TeamsStartThreadInChannel) ![Screenshot from 2024-01-19 19-47-09](https://github.com/microsoft/botbuilder-python/assets/38049078/46c00745-5e46-4d9a-9509-2f01a902f40a) 4. Notice the error: ``` js [on_turn_error] unhandled error: 'CloudAdapter' object has no attribute 'create_connector_client' Traceback (most recent call last): File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/bot_adapter.py", line 174, in run_pipeline return await self._middleware.receive_activity_with_status( File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/middleware_set.py", line 69, in receive_activity_with_status return await self.receive_activity_internal(context, callback) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/middleware_set.py", line 79, in receive_activity_internal return await callback(context) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/activity_handler.py", line 70, in on_turn await self.on_message_activity(turn_context) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/bots/teams_start_thread_in_channel.py", li ne 17, in on_message_activity team_details = await TeamsInfo.get_team_details(turn_context) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/teams/teams_info.py", line 120, in get_team_details teams_connector = await TeamsInfo.get_teams_connector_client(turn_context) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/teams/teams_info.py", line 305, in get_teams_connector_client connector_client = await TeamsInfo._get_connector_client(turn_context) File "/home/rampage/Documents/bots/BotBuilder-Samples/archive/samples/python/58.teams-start-thread-in-channel/venv/lib/python3.10/site-packages/botbuild er/core/teams/teams_info.py", line 321, in _get_connector_client return await turn_context.adapter.create_connector_client( AttributeError: 'CloudAdapter' object has no attribute 'create_connector_client' ``` Call stack diagram: ```md teams_start_thread_in_channel.py |__ on_message_activity (line 17) | |__ TeamsInfo.get_team_details | |__ TeamsInfo.get_teams_connector_client | |__ TeamsInfo._get_connector_client (line 320) | |__ turn_context.adapter.create_connector_client (line 321) <---- error occurs ``` CC @stevkan to handle this issue internally with Microsoft engineers. @BasileBerckmoes, I'm in the same boat, teaching myself the framework as well. I explored the python bot framework sdk source code and tweaked [cloud_adapter.py](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/cloud_adapter.py) and [teams_info.py](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py), which I think resolved the issue. However, I'm still uncertain if this is the right approach since I'm learning the framework and need feedback from the engineers. # ### Observation: In the javascript sdk side, the implementation detail for [getConnectorClient](https://github.com/microsoft/botbuilder-js/blob/6b2656b7dd149c5f9869ddcb6dc53d1f7e022c37/libraries/botbuilder/src/teamsInfo.ts#L627C20-L637) in [teamsInfo.ts](https://github.com/microsoft/botbuilder-js/blob/6b2656b7dd149c5f9869ddcb6dc53d1f7e022c37/libraries/botbuilder/src/teamsInfo.ts) is as follows: ``` js private static getConnectorClient(context: TurnContext): ConnectorClient { const client = context.adapter && 'createConnectorClient' in context.adapter ? (context.adapter as BotFrameworkAdapter).createConnectorClient(context.activity.serviceUrl) : context.turnState?.get<ConnectorClient>(context.adapter.ConnectorClientKey); if (!client) { throw new Error('This method requires a connector client.'); } return client; } ``` as opposed to [_get_connector_client ](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py#L320-L323) method in [teams_info.py](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py) on the python sdk: ```py @staticmethod async def _get_connector_client(turn_context: TurnContext) -> ConnectorClient: return await turn_context.adapter.create_connector_client( turn_context.activity.service_url ) ``` In JavaScript, `getConnectorClient` checks if the "createConnectorClient" method exists in "context.adapter" and calls it, otherwise it retrieves a "ConnectorClient" from "context.turnState" using a predefined key. So my idea was to implement the same logic as `getConnectorClient` method but in python sdk. # ### Changes: updated [_get_connector_client ](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py#L320-L323)method in [teams_info.py](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-core/botbuilder/core/teams/teams_info.py): ``` diff + from botbuilder.integration.aiohttp import CloudAdapter async def _get_connector_client(turn_context: TurnContext) -> ConnectorClient: + if isinstance(turn_context.adapter, CloudAdapter): + return await turn_context.adapter.create_connector_client(turn_context) + else: return await turn_context.adapter.create_connector_client( turn_context.activity.service_url ) ``` and in [CloudAdapter](https://github.com/microsoft/botbuilder-python/blob/e14f4f4e6a26b439872187afab6f322758730209/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/cloud_adapter.py#L44) class, I added this method: ```py async def create_connector_client(self, turn_context: TurnContext): connector_client: ConnectorClient = turn_context.turn_state.get( self.BOT_CONNECTOR_CLIENT_KEY ) return connector_client ``` finally, in [58.teams-start-thread-in-channel](https://github.com/microsoft/BotBuilder-Samples/tree/main/archive/samples/python/58.teams-start-thread-in-channel) bot sample(not sdk), in file [teams_start_thread_in_channel.py](https://github.com/microsoft/BotBuilder-Samples/blob/main/archive/samples/python/58.teams-start-thread-in-channel/bots/teams_start_thread_in_channel.py), i changed: ```py connector_client = await turn_context.adapter.create_connector_client(turn_context.activity.service_url) ``` to: ```py connector_client = await turn_context.adapter.create_connector_client(turn_context) ``` # ### Result: ```py # Get the team details team_details = await TeamsInfo.get_team_details(turn_context) if team_details: team_id = team_details.id # Retrieve all members of the team team_members = await TeamsInfo.get_team_members(turn_context, team_id) for member in team_members: if member.id != turn_context.activity.recipient.id: # Avoid messaging the bot itself # Prepare the message await turn_context.send_activity( f"name {member.name}" ) ``` ![Screenshot from 2024-01-20 02-03-52](https://github.com/microsoft/botbuilder-python/assets/38049078/2100068a-cd13-4e88-8c12-4b436f11bcd8)
2024-01-21T21:39:51
microsoft/botbuilder-python
2,069
microsoft__botbuilder-python-2069
[ "2059" ]
e98d4069fa516a816c3b8074d81a4f6c82411318
diff --git a/libraries/botbuilder-ai/setup.py b/libraries/botbuilder-ai/setup.py --- a/libraries/botbuilder-ai/setup.py +++ b/libraries/botbuilder-ai/setup.py @@ -8,7 +8,7 @@ "azure-cognitiveservices-language-luis==0.2.0", "botbuilder-schema==4.15.0", "botbuilder-core==4.15.0", - "aiohttp==3.8.5", + "aiohttp==3.9.3", ] TESTS_REQUIRES = ["aiounittest>=1.1.0"] diff --git a/libraries/botbuilder-integration-aiohttp/setup.py b/libraries/botbuilder-integration-aiohttp/setup.py --- a/libraries/botbuilder-integration-aiohttp/setup.py +++ b/libraries/botbuilder-integration-aiohttp/setup.py @@ -10,7 +10,7 @@ "botframework-connector==4.15.0", "botbuilder-core==4.15.0", "yarl>=1.8.1", - "aiohttp==3.8.5", + "aiohttp==3.9.3", ] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py --- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py +++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py @@ -6,7 +6,7 @@ REQUIRES = [ "applicationinsights>=0.11.9", - "aiohttp==3.8.5", + "aiohttp==3.9.3", "botbuilder-schema==4.15.0", "botframework-connector==4.15.0", "botbuilder-core==4.15.0",
diff --git a/libraries/botframework-connector/tests/requirements.txt b/libraries/botframework-connector/tests/requirements.txt --- a/libraries/botframework-connector/tests/requirements.txt +++ b/libraries/botframework-connector/tests/requirements.txt @@ -1,5 +1,5 @@ pytest-cov>=2.6.0 -pytest~=6.2.3 +pytest~=7.3.1 pyyaml==6.0 pytest-asyncio==0.15.1 ddt==1.2.1 \ No newline at end of file
Recommended change to 3.8.6 or above https://github.com/microsoft/botbuilder-python/blob/7b064bb9f916afc10e931f3713183f57e1d7ca47/libraries/botbuilder-integration-aiohttp/setup.py#L13 I have a conflict when introducing llamaindex, which requires version 3.8.6 or higher!
It also gets us flagged for 5 aiohttp vulnerabilities (1 high severity, 4 medium)
2024-02-08T17:43:25
microsoft/botbuilder-python
2,083
microsoft__botbuilder-python-2083
[ "2084" ]
4acc0ea4157d95c018925db72ed3ce8832298641
diff --git a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py --- a/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py +++ b/libraries/botbuilder-schema/botbuilder/schema/teams/_models_py3.py @@ -1903,7 +1903,7 @@ class TeamsChannelAccount(ChannelAccount): "surname": {"key": "surname", "type": "str"}, "email": {"key": "email", "type": "str"}, "user_principal_name": {"key": "userPrincipalName", "type": "str"}, - "aad_object_id": {"key": "objectId", "type": "str"}, + "aad_object_id": {"key": "aadObjectId", "type": "str"}, "tenant_id": {"key": "tenantId", "type": "str"}, "user_role": {"key": "userRole", "type": "str"}, }
TeamsChannelAccount model missing aadObjectId Fixes TeamsChannelAccount.add_object_id property. The curent definition returns an additional property and a null add_object_id: {'additional_property': {'aadObjectId': <UUID>}, 'add_object_id`: None} instead of: {'additional_properties': {}, `add_object_id` <UUID>} See: https://learn.microsoft.com/en-us/dotnet/api/microsoft.bot.schema.teams.teamschannelaccount?view=botbuilder-dotnet-stable
2024-03-06T02:24:42
microsoft/botbuilder-python
2,117
microsoft__botbuilder-python-2117
[ "2116" ]
bc3b6f4125e6f4ca5e6cae2dd25c62233e8ef508
diff --git a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py --- a/libraries/botframework-connector/botframework/connector/auth/skill_validation.py +++ b/libraries/botframework-connector/botframework/connector/auth/skill_validation.py @@ -120,7 +120,7 @@ async def authenticate_channel_token( ) if auth_configuration.valid_token_issuers: - token_validation_parameters.issuer.append( + token_validation_parameters.issuer.extend( auth_configuration.valid_token_issuers )
valid_token_issuers incorrectly populated during SingleTenant validation ### [Github issues](https://github.com/Microsoft/botbuilder-python) should be used for bugs and feature requests. Use [Stack Overflow](https://stackoverflow.com/questions/tagged/botframework) for general "how-to" questions. ## Version 4.15.0 ## Describe the bug Tenant specific token issuers (valid_token_issuers) are added as a nested element, instead of individual issuers. ## To Reproduce Steps to reproduce the behavior: Authenticating with app_type 'SingleTenant' will result in unauthorized requests. ## Expected behavior Tenant specific token issues are treated as valid.
2024-05-29T14:08:19
microsoft/botbuilder-python
2,120
microsoft__botbuilder-python-2120
[ "2119" ]
b33dc49e1cc51c893b1585974bafddaab68ebbe4
diff --git a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py --- a/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py +++ b/libraries/botbuilder-core/botbuilder/core/cloud_adapter_base.py @@ -120,7 +120,7 @@ async def update_activity(self, context: TurnContext, activity: Activity): raise Error("Unable to extract ConnectorClient from turn context.") response = await connector_client.conversations.update_activity( - activity.conversation.id, activity.reply_to_id, activity + activity.conversation.id, activity.id, activity ) response_id = response.id if response and response.id else None
CloudAdapterBase.update_activity using wrong activity id argument ``` response = await connector_client.conversations.update_activity( activity.conversation.id, activity.reply_to_id, activity ) ``` should be ``` response = await connector_client.conversations.update_activity( activity.conversation.id, activity.id, activity ) ```
2024-05-30T14:33:58
opentensor/bittensor
32
opentensor__bittensor-32
[ "21" ]
b3e6f460b3cf49679f78e30b838f2e0157925ac0
diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,50 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + + +# -- Project information ----------------------------------------------------- + +project = 'BitTensor' +copyright = '2020, Jacob R. Steeves, Ala Shaabana' +author = 'Jacob R. Steeves, Ala Shaabana' + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = ['recommonmark'] +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static']
Add readthedocs documentation Repository is integrated with readthedocs , but it still requires actual documentation to be placed in it for it to be generated on readthedocs. The first thing we should start with is the NashTensor description, then a GRPC description, then the full SDK documentation can follow in another ticket.
2020-10-01T17:59:20
opentensor/bittensor
36
opentensor__bittensor-36
[ "33" ]
fb16ca3337a15d46ab8f690d9c00ebe2c64b24aa
diff --git a/examples/mnist/main.py b/examples/mnist/main.py --- a/examples/mnist/main.py +++ b/examples/mnist/main.py @@ -16,6 +16,10 @@ import math import time import torch +import torchvision +from typing import List, Tuple, Dict, Optional +import traceback + import torch.nn as nn import torch.nn.functional as F import torch.optim as optim @@ -338,6 +342,7 @@ def test( model: bittensor.Synapse ): epoch += 1 except Exception as e: + traceback.print_exc() logger.error(e) metagraph.stop() axon.stop()
Docker and python files take different cmd line arguments Presently, docker run of Bittensor takes different flags as cmd line arguments than the typical Python native application. This is because the dockerized version "assumes" all the flags for running it in python, when in reality it should be taking the same flags as the python cmd line arguments and passing them along until it runs the model in Python.
2020-10-05T18:57:28
opentensor/bittensor
39
opentensor__bittensor-39
[ "25" ]
595543f8f0a0c68c5d1caaa7ef3e13efedc96a71
diff --git a/bittensor/dendrite.py b/bittensor/dendrite.py --- a/bittensor/dendrite.py +++ b/bittensor/dendrite.py @@ -56,6 +56,7 @@ def forward(self, synapses: List[bittensor_pb2.Synapse], x: List[ object ], mode # Call remote synapse. results.append(remote_synapse(forward_inputs, mode)) + return results # NOTE: (const) This code has been ported from hivemind thanks to Yozh and Max. @@ -130,10 +131,14 @@ def forward(ctx, caller: RemoteSynapse, dummy: torch.Tensor, inputs: object, mod ) # Make rpc call. - response = ctx.caller.stub.Forward(request) - - # Deserialize outputs and return. - outputs = PyTorchSerializer.deserialize_tensor(response.tensors[0]) + try: + response = ctx.caller.stub.Forward(request) + # Deserialize outputs and return. + outputs = PyTorchSerializer.deserialize_tensor(response.tensors[0]) + except grpc._channel._InactiveRpcError as ire: + #logger.error("Could not forward() to peer: {}".format(ire)) + outputs = torch.zeros((inputs.size(0), bittensor.__network_dim__)) + return outputs @staticmethod @@ -154,14 +159,19 @@ def backward(ctx, grads: torch.Tensor) -> Optional[torch.Tensor]: tensors = [serialized_inputs, serialized_grads] ) - # Attain backward response - response = ctx.caller.stub.Backward(request) + deserialized_grad_inputs = torch.zeros(1,1) + + try: + # Attain backward response + response = ctx.caller.stub.Backward(request) - # Deserialize grad responses. - # TODO (const) maybe remove this? - if ctx.mode == bittensor_pb2.Modality.TEXT: - return (None, None, None, None) - else: - deserialized_grad_inputs = PyTorchSerializer.deserialize (response.tensors[0]) - return (None, None, deserialized_grad_inputs, None) - \ No newline at end of file + # Deserialize grad responses. + # TODO (const) maybe remove this? + if ctx.mode == bittensor_pb2.Modality.TEXT: + return (None, None, None, None) + else: + deserialized_grad_inputs = PyTorchSerializer.deserialize (response.tensors[0]) + return (None, None, deserialized_grad_inputs, None) + except grpc._channel._InactiveRpcError as ire: + #logger.error("Could not backward() to peer: {}".format(ire)) + return (None, None, deserialized_grad_inputs, None) \ No newline at end of file diff --git a/bittensor/metagraph.py b/bittensor/metagraph.py --- a/bittensor/metagraph.py +++ b/bittensor/metagraph.py @@ -82,8 +82,8 @@ def _sink(self, request: bittensor_pb2.GossipBatch): self._heartbeat[synapse.synapse_key] = time.time() def Gossip(self, request: bittensor_pb2.GossipBatch, context): - synapses = self.get_synapses(1000) - peers = self.get_peers(10) + synapses = self.synapses(1000) + peers = self.peers(10) self._sink(request) response = bittensor_pb2.GossipBatch(peers=peers, synapses=synapses) return response @@ -93,15 +93,15 @@ def do_gossip(self): if len(self._peers) == 0: return - synapses = self.get_synapses(1000) - peers = self.get_peers(10) + synapses = self.synapses(1000) + peers = self.peers(10) metagraph_address = random.choice(list(self._peers)) realized_address = metagraph_address if metagraph_address.split(':')[0] == self._config.remote_ip: realized_address = 'localhost:' + str(metagraph_address.split(":")[1]) try: - version = bittensor.__version__ + channel = grpc.insecure_channel(realized_address) stub = bittensor_grpc.MetagraphStub(channel) request = bittensor_pb2.GossipBatch(peers=peers, synapses=synapses) @@ -131,18 +131,20 @@ def _update(self): while self._running: self.do_gossip() if len(self._peers) > 0: - self.do_clean(60*60) + self.do_clean(15) time.sleep(10) - except (KeyboardInterrupt, SystemExit): + except (KeyboardInterrupt, SystemExit) as e: logger.info('stop metagraph') self._running = False self.stop() + raise e def _serve(self): try: self._server.start() - except (KeyboardInterrupt, SystemExit): + except (KeyboardInterrupt, SystemExit) as ex: self.stop() + raise ex except Exception as e: logger.error(e) diff --git a/examples/mnist/main.py b/examples/mnist/main.py --- a/examples/mnist/main.py +++ b/examples/mnist/main.py @@ -96,7 +96,7 @@ def train(model, epoch, global_step): # Logs: if batch_idx % log_interval == 0: - n_peers = len(bittensor.metagraph.peers) + n_peers = len(bittensor.metagraph.peers()) n_synapses = len(bittensor.metagraph.synapses()) writer.add_scalar('n_peers', n_peers, global_step) writer.add_scalar('n_synapses', n_synapses, global_step) @@ -104,7 +104,7 @@ def train(model, epoch, global_step): n = len(train_data) logger.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLocal Loss: {:.6f}\nNetwork Loss: {:.6f}\tDistillation Loss: {:.6f}\tnP|nS: {}|{}'.format( - epoch, (batch_idx * batch_size_train), n, (100. * batch_idx * batch_size_train)/n, output['local_target_loss'].item(), output['network_target_loss'].item(), output['distillation_loss'].item(), len(bittensor.metagraph.peers), + epoch, (batch_idx * batch_size_train), n, (100. * batch_idx * batch_size_train)/n, output['local_target_loss'].item(), output['network_target_loss'].item(), output['distillation_loss'].item(), len(bittensor.metagraph.peers()), len(bittensor.metagraph.synapses()))) # Test loop.
Bittensor throws exception if one peer dies or stops contributing We need to surround things with try/catch blocks to get Bittensor to continue without interruption if a specific peer dies or stops contributing to it.
2020-10-09T20:26:31
opentensor/bittensor
48
opentensor__bittensor-48
[ "48" ]
9d8b52534553fb1aa7969fa2a4876526722a0db7
diff --git a/bittensor/dendrite.py b/bittensor/dendrite.py --- a/bittensor/dendrite.py +++ b/bittensor/dendrite.py @@ -1,11 +1,13 @@ from bittensor import bittensor_pb2_grpc as bittensor_grpc from bittensor import bittensor_pb2 from bittensor.serializer import PyTorchSerializer -import bittensor from loguru import logger from typing import List, Tuple, Dict, Optional +from bittensor.exceptions.ResponseExceptions import EmptyTensorException + +import bittensor import os import grpc import PIL @@ -86,7 +88,10 @@ def __init__(self, synapse: bittensor_pb2.Synapse, config: bittensor.Config): self.local_neuron_key = config.neuron_key # Loop back if the synapse is local. if synapse.address == config.remote_ip: - self.endpoint = 'localhost:' + synapse.port + ip = "localhost:" + if config.remote_ip == "host.docker.internal": + ip = "host.docker.internal:" + self.endpoint = ip + synapse.port else: self.endpoint = synapse.address + ':' + synapse.port # TODO(const): should accept defaults. config = bittensor.config_or_defaults(config) @@ -148,10 +153,16 @@ def forward(ctx, caller: RemoteSynapse, dummy: torch.Tensor, inputs: torch.Tenso try: response = ctx.caller.stub.Forward(request) # Deserialize outputs and return. - outputs = PyTorchSerializer.deserialize_tensor(response.tensors[0]) + if len(response.tensors) > 0: + outputs = PyTorchSerializer.deserialize_tensor(response.tensors[0]) + else: + raise EmptyTensorException + except grpc._channel._InactiveRpcError as ire: #logger.error("Could not forward() to peer: {}".format(ire)) outputs = torch.zeros((inputs.size(0), bittensor.__network_dim__)) + except EmptyTensorException as ete: + outputs = torch.zeros((inputs.size(0), bittensor.__network_dim__)) return outputs diff --git a/bittensor/exceptions/ResponseExceptions.py b/bittensor/exceptions/ResponseExceptions.py new file mode 100644 --- /dev/null +++ b/bittensor/exceptions/ResponseExceptions.py @@ -0,0 +1,3 @@ +class EmptyTensorException (Exception): + """ Raised when tensor included in the response is unexpectedly empty """ + pass \ No newline at end of file
Deploy BT on digitalocean This PR enables us to deploy BT nodes on digitalocean for more large scale experimenting. Following container logs is now also an optional argument behind the command line argument `logging`. Node can be started with: `./start_bittensor.sh -m <metagraph_port> -t <digital_ocean_token> --logging true` Subsequent nodes can be started with `./start_bittensor.sh --axon_port <bootstrap peer's axon port> --bootstrap <bootstrap peer's IP:port address> --token <digital_ocean_token>` closes #48
2020-10-12T16:09:58
opentensor/bittensor
969
opentensor__bittensor-969
[ "957", "955" ]
1e460bbb9d4c9ed3105f2a51c7dfd1db5cd129c0
diff --git a/bittensor/__init__.py b/bittensor/__init__.py --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -23,7 +23,7 @@ nest_asyncio.apply() # Bittensor code and protocol version. -__version__ = '3.4.1' +__version__ = '3.4.2' version_split = __version__.split(".") __version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
Bit 590 backward fix - Keep count of remote loss on server, saving model based on most 20 recent average loss. - When doing tokenization remap, make sure the input size and output size is the same. - Ensure encode_forward_causallmnext deterministic by set seed. - When both local_train and remote_train are on: do local_train only when the server is free. - Validator default to do backward train Minor fixes 1) Removes the parser generation on the config with messes with --help when using a parser 2) Turns off console-rich local logging (which sucks)
2022-10-31T21:29:31
opentensor/bittensor
1,231
opentensor__bittensor-1231
[ "1228" ]
5039d8833d95304a2e9f37d7c9ff04b15ebff4fe
diff --git a/bittensor/_cli/__init__.py b/bittensor/_cli/__init__.py --- a/bittensor/_cli/__init__.py +++ b/bittensor/_cli/__init__.py @@ -59,11 +59,10 @@ def __new__( return naka_CLI(config=config) else: return cli_impl.CLI( config = config) - - @staticmethod - def config(args: List[str]) -> 'bittensor.config': - """ From the argument parser, add config to bittensor.executor and local config - Return: bittensor.config object + + @staticmethod + def __create_parser__() -> 'argparse.ArgumentParser': + """ Creates the argument parser for the bittensor cli. """ parser = argparse.ArgumentParser( description=f"bittensor cli v{bittensor.__version__}", @@ -88,7 +87,6 @@ def config(args: List[str]) -> 'bittensor.config': MetagraphCommand.add_args( cmd_parsers ) SetWeightsCommand.add_args( cmd_parsers ) NewColdkeyCommand.add_args( cmd_parsers ) - NewHotkeyCommand.add_args( cmd_parsers ) MyDelegatesCommand.add_args( cmd_parsers ) ListSubnetsCommand.add_args( cmd_parsers ) RegenHotkeyCommand.add_args( cmd_parsers ) @@ -99,6 +97,15 @@ def config(args: List[str]) -> 'bittensor.config': RegenColdkeypubCommand.add_args( cmd_parsers ) RecycleRegisterCommand.add_args( cmd_parsers ) + return parser + + @staticmethod + def config(args: List[str]) -> 'bittensor.config': + """ From the argument parser, add config to bittensor.executor and local config + Return: bittensor.config object + """ + parser = cli.__create_parser__() + # If no arguments are passed, print help text. if len(args) == 0: parser.print_help()
diff --git a/tests/integration_tests/test_cli_no_network.py b/tests/integration_tests/test_cli_no_network.py --- a/tests/integration_tests/test_cli_no_network.py +++ b/tests/integration_tests/test_cli_no_network.py @@ -19,8 +19,10 @@ import unittest from unittest.mock import MagicMock, patch +from typing import Any import pytest from copy import deepcopy +import re import bittensor @@ -30,9 +32,24 @@ class TestCLINoNetwork(unittest.TestCase): @classmethod def setUpClass(cls) -> None: + mock_delegate_info = { + "hotkey_ss58": "", + "total_stake": bittensor.Balance.from_rao(0), + "nominators": [], + "owner_ss58": "", + "take": 0.18, + "validator_permits": [], + "registrations": [], + "return_per_1000": bittensor.Balance.from_rao(0), + "total_daily_return": bittensor.Balance.from_rao(0) + } cls._patched_subtensor = patch('bittensor._subtensor.subtensor_mock.mock_subtensor.mock', new=MagicMock( return_value=MagicMock( get_subnets=MagicMock(return_value=[1]), # Mock subnet 1 ONLY. + block=10_000, + get_delegates=MagicMock(return_value=[ + bittensor.DelegateInfo( **mock_delegate_info ) + ]), ) )) cls._patched_subtensor.start() @@ -64,10 +81,6 @@ def construct_config(): return defaults def test_check_configs(self): - commands = ["run", "transfer", "register", "unstake", - "stake", "overview", "new_coldkey", "new_hotkey", - "regen_coldkey", "regen_hotkey", "metagraph", "weights", - "set_weights", "inspect"] config = self.config config.no_prompt = True config.model = "core_server" @@ -78,12 +91,30 @@ def test_check_configs(self): config.uids = [1,2,3] config.weights = [0.25, 0.25, 0.25, 0.25] config.no_version_checking = True + config.ss58_address = bittensor.Keypair.create_from_seed( b'0' * 32 ).ss58_address + config.public_key_hex = None cli = bittensor.cli + + # Get argparser + parser = cli.__create_parser__() + # Get all commands from argparser + commands = [ + command for command in parser._actions[1].choices + ] + + def ask_response(prompt: str) -> Any: + if "delegate index" in prompt: + return 0 + elif "wallet name" in prompt: + return "mock" + elif "hotkey" in prompt: + return "mock" - for cmd in commands: - config.command = cmd - cli.check_config(config) + with patch('rich.prompt.Prompt.ask', ask_response): + for cmd in commands: + config.command = cmd + cli.check_config(config) def test_new_coldkey( self ): config = self.config @@ -256,8 +287,23 @@ def test_btcli_help(self): # Expected help output if all commands are listed assert 'positional arguments' in help_out # Verify that cli is printing the help message for - assert 'overview' in help_out - assert 'run' in help_out + # Get argparser + parser = bittensor.cli.__create_parser__() + # Get all commands from argparser + commands = [ + command for command in parser._actions[1].choices + ] + # Verify that all commands are listed in the help message + for command in commands: + assert command in help_out + + # Verify there are no duplicate commands + # Listed twice. Once in the positional arguments and once in the optional arguments + for command in commands: + pat = re.compile(rf'\n\s+({command})\s+\w') + matches = pat.findall(help_out) + + self.assertEqual( len(matches), 1, f"Duplicate command {command} in help output") def test_register_cuda_use_cuda_flag(self): class ExitEarlyException(Exception):
new_hotkey is listed twice under 'btcli --help' menu new_hotkey is listed twice under 'btcli --help' menu
2023-03-24T15:02:36
opentensor/bittensor
1,293
opentensor__bittensor-1293
[ "1292" ]
7fe1a17c5ec51718bcbfd7ae65ac52979c9d8b97
diff --git a/bittensor/_synapse/text_prompting/synapse.py b/bittensor/_synapse/text_prompting/synapse.py --- a/bittensor/_synapse/text_prompting/synapse.py +++ b/bittensor/_synapse/text_prompting/synapse.py @@ -21,6 +21,7 @@ from typing import List, Dict, Union, Callable from abc import ABC, abstractmethod +import json class SynapseForward( bittensor.SynapseCall ): name: str = "text_prompting_forward" @@ -35,7 +36,7 @@ def __init__( ): super().__init__( synapse = synapse, request_proto = request_proto ) self.messages = request_proto.messages - self.formatted_messages = [ message for message in self.messages ] + self.formatted_messages = [ json.loads(message) for message in self.messages ] self.forward_callback = forward_callback def apply( self ):
Miner error on netuid 1 - string indicies must be integers When running a miner on netuid 1, we get this output with current text_prompting branch: ![image](https://user-images.githubusercontent.com/35969959/233508574-7502a388-a7cb-4748-b0f1-88bbcfbd3dfe.png) It should cause an error for any miner, since what happens is that before the bittensor.BasePromptingMiner (in this case in neurons/text/prompting/miners/pythia/neuron.py) previously recieved the "messages" argument in the form of a list of dictionaries, but now gets it in the form of a list with strings. To clarify,efore, we got something like this: [{'role': 'system', 'content': '\nYou are designed to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics.\n'}, {'role': 'user', 'content': '\nAsk me a random question about anything. Make the question very domain specific. Do not include the answer in the question.\n'}] Now we get something like this: ['{"role": "system", "content": "\\nYou are designed to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics.\\n"}', '{"role": "user", "content": "What is the function of the Golgi apparatus in a eukaryotic cell?"}'] While making a more permanent fix to this issue, I can confirm that this quickfix works: Adding this to the start of the forward() function in the miner, in this case in this case in neurons/text/prompting/miners/pythia/neuron.py: ``` import json messages = [json.loads(item) for item in messages] ``` It takes all the strings in the messages variable and turns them into dictionaries.
2023-04-21T13:55:02
opentensor/bittensor
1,444
opentensor__bittensor-1444
[ "1442" ]
163e98153a1e5f6b6f40e9c8a9c1e7b9cd676d82
diff --git a/bittensor/__init__.py b/bittensor/__init__.py --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -27,7 +27,7 @@ nest_asyncio.apply() # Bittensor code and protocol version. -__version__ = '5.3.0' +__version__ = '5.3.1' version_split = __version__.split(".") __version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2])) __new_signature_version__ = 360 diff --git a/bittensor/_subtensor/subtensor_impl.py b/bittensor/_subtensor/subtensor_impl.py --- a/bittensor/_subtensor/subtensor_impl.py +++ b/bittensor/_subtensor/subtensor_impl.py @@ -736,7 +736,7 @@ def is_senate_member( hotkey_ss58: str, block: Optional[int] = None, ) -> bool: - senate_members = self.query_module(module="Senate", name="Members", block=block ).serialize() + senate_members = self.query_module(module="SenateMembers", name="Members", block=block ).serialize() return senate_members.count( hotkey_ss58 ) > 0 def get_vote_data(
diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,18 @@ +# The MIT License (MIT) +# Copyright © 2022 Yuma Rao +# Copyright © 2022-2023 Opentensor Foundation +# Copyright © 2023 Opentensor Technologies + +# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated +# documentation files (the “Software”), to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of +# the Software. + +# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO +# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/tests/mocks/__init__.py b/tests/helpers/__init__.py similarity index 90% rename from tests/mocks/__init__.py rename to tests/helpers/__init__.py --- a/tests/mocks/__init__.py +++ b/tests/helpers/__init__.py @@ -15,5 +15,4 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -from .wallet_mock import MockWallet as MockWallet -from .keyfile_mock import MockKeyfile as MockKeyfile \ No newline at end of file +from .helpers import _get_mock_coldkey, _get_mock_hotkey, _get_mock_keypair, _get_mock_wallet, CLOSE_IN_VALUE, MockConsole diff --git a/tests/helpers.py b/tests/helpers/helpers.py similarity index 73% rename from tests/helpers.py rename to tests/helpers/helpers.py --- a/tests/helpers.py +++ b/tests/helpers/helpers.py @@ -15,15 +15,26 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -from typing import Union, Optional -from bittensor import Balance, NeuronInfo, axon_info, PrometheusInfo, Keypair, __ss58_format__ -from scalecodec import ss58_encode +from typing import Union +from bittensor import Balance, NeuronInfo, axon_info, PrometheusInfo, __ss58_format__ from rich.console import Console from rich.text import Text -from tests.mocks.wallet_mock import MockWallet +from bittensor_wallet.mock import MockWallet as _MockWallet, utils as _mock_wallet_utils + +_get_mock_coldkey = _mock_wallet_utils.get_mock_coldkey +_get_mock_hotkey = _mock_wallet_utils.get_mock_hotkey +_get_mock_keypair = _mock_wallet_utils.get_mock_keypair +_get_mock_wallet = _mock_wallet_utils.get_mock_wallet + + +def __mock_wallet_factory__(*args, **kwargs) -> _MockWallet: + """Returns a mock wallet object.""" + + mock_wallet = _get_mock_wallet() + + return mock_wallet -from Crypto.Hash import keccak class CLOSE_IN_VALUE(): value: Union[float, int, Balance] @@ -40,24 +51,7 @@ def __eq__(self, __o: Union[float, int, Balance]) -> bool: ((__o - self.tolerance) <= self.value and self.value <= (__o + self.tolerance)) -def get_mock_keypair( uid: int, test_name: Optional[str] = None ) -> Keypair: - """ - Returns a mock keypair from a uid and optional test_name. - If test_name is not provided, the uid is the only seed. - If test_name is provided, the uid is hashed with the test_name to create a unique seed for the test. - """ - if test_name is not None: - hashed_test_name: bytes = keccak.new(digest_bits=256, data=test_name.encode('utf-8')).digest() - hashed_test_name_as_int: int = int.from_bytes(hashed_test_name, byteorder='big', signed=False) - uid = uid + hashed_test_name_as_int - - return Keypair.create_from_seed( seed_hex = int.to_bytes(uid, 32, 'big', signed=False), ss58_format = __ss58_format__) - -def get_mock_hotkey( uid: int ) -> str: - return get_mock_keypair(uid).ss58_address -def get_mock_coldkey( uid: int ) -> str: - return get_mock_keypair(uid).ss58_address def get_mock_neuron(**kwargs) -> NeuronInfo: """ @@ -124,29 +118,11 @@ def get_mock_neuron(**kwargs) -> NeuronInfo: def get_mock_neuron_by_uid( uid: int, **kwargs ) -> NeuronInfo: return get_mock_neuron( uid = uid, - hotkey = get_mock_hotkey(uid), - coldkey = get_mock_coldkey(uid), + hotkey = _get_mock_hotkey(uid), + coldkey = _get_mock_coldkey(uid), **kwargs ) -def get_mock_wallet(coldkey: "Keypair" = None, hotkey: "Keypair" = None): - wallet = MockWallet( - name = 'mock_wallet', - hotkey = 'mock', - path = '/tmp/mock_wallet', - ) - - if not coldkey: - coldkey = Keypair.create_from_mnemonic(Keypair.generate_mnemonic()) - if not hotkey: - hotkey = Keypair.create_from_mnemonic(Keypair.generate_mnemonic()) - - wallet.set_coldkey(coldkey, encrypt=False, overwrite=True) - wallet.set_coldkeypub(coldkey, encrypt=False, overwrite=True) - wallet.set_hotkey(hotkey, encrypt=False, overwrite=True) - - return wallet - class MockStatus: def __enter__(self): return self diff --git a/tests/integration_tests/__init__.py b/tests/integration_tests/__init__.py new file mode 100644 diff --git a/tests/integration_tests/test_cli.py b/tests/integration_tests/test_cli.py --- a/tests/integration_tests/test_cli.py +++ b/tests/integration_tests/test_cli.py @@ -30,7 +30,7 @@ import bittensor from bittensor.utils.balance import Balance -from tests.helpers import MockConsole, get_mock_keypair, get_mock_wallet as generate_wallet +from tests.helpers import MockConsole, _get_mock_keypair, _get_mock_wallet as generate_wallet from bittensor._subtensor.subtensor_mock import MockSubtensor @@ -109,7 +109,7 @@ def test_overview(self): mock_hotkeys = ["hk0", "hk1", "hk2", "hk3", "hk4"] - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -117,7 +117,7 @@ def test_overview(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), coldkeypub_file=MagicMock( exists_on_device=MagicMock(return_value=True) # Wallet exists ), @@ -216,7 +216,7 @@ def test_overview_not_in_first_subnet(self): mock_hotkeys = ["hk0", "hk1", "hk2", "hk3", "hk4"] - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -224,7 +224,7 @@ def test_overview_not_in_first_subnet(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), coldkeypub_file=MagicMock( exists_on_device=MagicMock(return_value=True) # Wallet exists ), @@ -470,7 +470,7 @@ def test_unstake_with_specific_hotkeys(self): "hk2": bittensor.Balance.from_float(12.2), } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -478,7 +478,7 @@ def test_unstake_with_specific_hotkeys(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -547,7 +547,7 @@ def test_unstake_with_all_hotkeys(self): "hk2": bittensor.Balance.from_float(12.2), } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -555,7 +555,7 @@ def test_unstake_with_all_hotkeys(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(list(mock_stakes.keys())) ] @@ -625,7 +625,7 @@ def test_unstake_with_exclude_hotkeys_from_all(self): "hk2": bittensor.Balance.from_float(12.2), } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -633,7 +633,7 @@ def test_unstake_with_exclude_hotkeys_from_all(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(list(mock_stakes.keys())) ] @@ -710,7 +710,7 @@ def test_unstake_with_multiple_hotkeys_max_stake(self): "hk2": bittensor.Balance.from_float(12.2), } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -718,7 +718,7 @@ def test_unstake_with_multiple_hotkeys_max_stake(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(list(mock_stakes.keys())) ] @@ -792,7 +792,7 @@ def test_stake_with_specific_hotkeys(self): mock_balance = bittensor.Balance.from_float(22.2) - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -800,7 +800,7 @@ def test_stake_with_specific_hotkeys(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -867,7 +867,7 @@ def test_stake_with_all_hotkeys(self): mock_balance = bittensor.Balance.from_float(22.0) - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -875,7 +875,7 @@ def test_stake_with_all_hotkeys(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(mock_hotkeys) ] @@ -965,7 +965,7 @@ def test_stake_with_exclude_hotkeys_from_all(self): mock_balance = bittensor.Balance.from_float(25.0) - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -973,7 +973,7 @@ def test_stake_with_exclude_hotkeys_from_all(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(mock_hotkeys) ] @@ -1071,7 +1071,7 @@ def test_stake_with_multiple_hotkeys_max_stake(self): "hk2": bittensor.Balance.from_float(0.0), } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -1079,7 +1079,7 @@ def test_stake_with_multiple_hotkeys_max_stake(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -1180,7 +1180,7 @@ def test_stake_with_multiple_hotkeys_max_stake_not_enough_balance(self): 15.0 * 2 ) # Not enough for all hotkeys - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -1188,7 +1188,7 @@ def test_stake_with_multiple_hotkeys_max_stake_not_enough_balance(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -1273,7 +1273,7 @@ def test_stake_with_single_hotkey_max_stake(self): mock_balance = bittensor.Balance.from_float(15.0 * 3) - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -1281,7 +1281,7 @@ def test_stake_with_single_hotkey_max_stake(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -1361,7 +1361,7 @@ def test_stake_with_single_hotkey_max_stake_not_enough_balance(self): mock_balance = bittensor.Balance.from_float(1.0) # Not enough balance to do max - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -1369,7 +1369,7 @@ def test_stake_with_single_hotkey_max_stake_not_enough_balance(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -1456,7 +1456,7 @@ def test_stake_with_single_hotkey_max_stake_enough_stake(self): "hk0": bittensor.Balance.from_float(config.max_stake * 2) } - mock_coldkey_kp = get_mock_keypair(0, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) mock_wallets = [ SimpleNamespace( @@ -1464,7 +1464,7 @@ def test_stake_with_single_hotkey_max_stake_enough_stake(self): coldkey=mock_coldkey_kp, coldkeypub=mock_coldkey_kp, hotkey_str=hk, - hotkey=get_mock_keypair(idx + 100, self.id()), + hotkey=_get_mock_keypair(idx + 100, self.id()), ) for idx, hk in enumerate(config.hotkeys) ] @@ -1552,10 +1552,10 @@ def test_nominate(self): mock_wallet = SimpleNamespace( name="w0", - coldkey=get_mock_keypair(0, self.id()), - coldkeypub=get_mock_keypair(0, self.id()), + coldkey=_get_mock_keypair(0, self.id()), + coldkeypub=_get_mock_keypair(0, self.id()), hotkey_str="hk0", - hotkey=get_mock_keypair(0 + 100, self.id()), + hotkey=_get_mock_keypair(0 + 100, self.id()), ) # Register mock wallet and give it a balance @@ -1615,10 +1615,10 @@ def test_delegate_stake(self): for idx_hk, hk in enumerate(list(mock_balances[wallet_name].keys())): wallet = SimpleNamespace( name=wallet_name, - coldkey=get_mock_keypair(idx, self.id()), - coldkeypub=get_mock_keypair(idx, self.id()), + coldkey=_get_mock_keypair(idx, self.id()), + coldkeypub=_get_mock_keypair(idx, self.id()), hotkey_str=hk, - hotkey=get_mock_keypair(idx * 100 + idx_hk, self.id()), + hotkey=_get_mock_keypair(idx * 100 + idx_hk, self.id()), ) mock_wallets.append(wallet) @@ -1704,10 +1704,10 @@ def test_undelegate_stake(self): for idx_hk, hk in enumerate(list(mock_balances[wallet_name].keys())): wallet = SimpleNamespace( name=wallet_name, - coldkey=get_mock_keypair(idx, self.id()), - coldkeypub=get_mock_keypair(idx, self.id()), + coldkey=_get_mock_keypair(idx, self.id()), + coldkeypub=_get_mock_keypair(idx, self.id()), hotkey_str=hk, - hotkey=get_mock_keypair(idx * 100 + idx_hk, self.id()), + hotkey=_get_mock_keypair(idx * 100 + idx_hk, self.id()), ) mock_wallets.append(wallet) @@ -1803,8 +1803,8 @@ def test_transfer(self): for idx, wallet_name in enumerate(list(mock_balances.keys())): wallet = SimpleNamespace( name=wallet_name, - coldkey=get_mock_keypair(idx, self.id()), - coldkeypub=get_mock_keypair(idx, self.id()), + coldkey=_get_mock_keypair(idx, self.id()), + coldkeypub=_get_mock_keypair(idx, self.id()), ) mock_wallets.append(wallet) @@ -1873,8 +1873,8 @@ def test_transfer_not_enough_balance(self): for idx, wallet_name in enumerate(list(mock_balances.keys())): wallet = SimpleNamespace( name=wallet_name, - coldkey=get_mock_keypair(idx, self.id()), - coldkeypub=get_mock_keypair(idx, self.id()), + coldkey=_get_mock_keypair(idx, self.id()), + coldkeypub=_get_mock_keypair(idx, self.id()), ) mock_wallets.append(wallet) @@ -1944,7 +1944,7 @@ def test_register(self): config.no_prompt = True mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -1973,7 +1973,7 @@ def test_recycle_register(self): config.no_prompt = True mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -2013,7 +2013,7 @@ def test_stake(self): subtensor = bittensor.subtensor(config) mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -2061,8 +2061,8 @@ def register_mock_neuron( ) -> int: mock_nn.append( SimpleNamespace( - hotkey=get_mock_keypair(i + 100, self.id()).ss58_address, - coldkey=get_mock_keypair(i, self.id()).ss58_address, + hotkey=_get_mock_keypair(i + 100, self.id()).ss58_address, + coldkey=_get_mock_keypair(i, self.id()).ss58_address, balance=Balance.from_rao(random.randint(0, 2**45)).rao, stake=Balance.from_rao(random.randint(0, 2**45)).rao, ) @@ -2186,12 +2186,12 @@ def test_delegate(self): Test delegate add command """ mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + hotkey = _get_mock_keypair( 100, self.id() ) ) delegate_wallet = generate_wallet( - hotkey = get_mock_keypair( + hotkey = _get_mock_keypair( 100 + 1, self.id() ) ) diff --git a/tests/integration_tests/test_cli_no_network.py b/tests/integration_tests/test_cli_no_network.py --- a/tests/integration_tests/test_cli_no_network.py +++ b/tests/integration_tests/test_cli_no_network.py @@ -24,7 +24,7 @@ from copy import deepcopy import re -from tests.helpers import get_mock_coldkey +from tests.helpers import _get_mock_coldkey import bittensor @@ -721,7 +721,7 @@ def test_delegate_prompt_wallet_name(self): base_args = [ 'delegate', '--all', - '--delegate_ss58key', get_mock_coldkey(0) + '--delegate_ss58key', _get_mock_coldkey(0) ] # Patch command to exit early with patch('bittensor._cli.commands.delegates.DelegateStakeCommand.run', return_value=None): @@ -762,7 +762,7 @@ def test_undelegate_prompt_wallet_name(self): base_args = [ 'undelegate', '--all', - '--delegate_ss58key', get_mock_coldkey(0) + '--delegate_ss58key', _get_mock_coldkey(0) ] # Patch command to exit early with patch('bittensor._cli.commands.delegates.DelegateUnstakeCommand.run', return_value=None): @@ -809,7 +809,7 @@ def test_delegate_prompt_hotkey(self): '--wallet.name', 'mock', ] - delegate_ss58 = get_mock_coldkey(0) + delegate_ss58 = _get_mock_coldkey(0) with patch('bittensor._cli.commands.delegates.show_delegates'): with patch('bittensor.Subtensor.get_delegates', return_value=[ bittensor.DelegateInfo( @@ -869,7 +869,7 @@ def test_undelegate_prompt_hotkey(self): '--wallet.name', 'mock', ] - delegate_ss58 = get_mock_coldkey(0) + delegate_ss58 = _get_mock_coldkey(0) with patch('bittensor._cli.commands.delegates.show_delegates'): with patch('bittensor.Subtensor.get_delegates', return_value=[ bittensor.DelegateInfo( diff --git a/tests/integration_tests/test_prometheus.py b/tests/integration_tests/test_prometheus.py --- a/tests/integration_tests/test_prometheus.py +++ b/tests/integration_tests/test_prometheus.py @@ -4,7 +4,7 @@ import unittest from unittest.mock import MagicMock, patch from bittensor._subtensor.subtensor_mock import MockSubtensor -from tests.helpers import get_mock_wallet +from tests.helpers import _get_mock_wallet _subtensor_mock: MockSubtensor = bittensor.subtensor( network = 'mock', _mock = True ) @@ -24,7 +24,7 @@ class TestPrometheus(unittest.TestCase): def setUp(self): self.subtensor = bittensor.subtensor(network = 'mock') - self.wallet = get_mock_wallet() + self.wallet = _get_mock_wallet() def test_init_prometheus_success(self): with patch.object(self.subtensor, '_do_serve_prometheus', return_value = (True, None)): diff --git a/tests/integration_tests/test_subtensor_integration.py b/tests/integration_tests/test_subtensor_integration.py --- a/tests/integration_tests/test_subtensor_integration.py +++ b/tests/integration_tests/test_subtensor_integration.py @@ -28,7 +28,7 @@ from bittensor.utils.balance import Balance from substrateinterface import Keypair from bittensor._subtensor.subtensor_mock import MockSubtensor -from tests.helpers import get_mock_hotkey, get_mock_coldkey, MockConsole, get_mock_keypair, get_mock_wallet +from tests.helpers import _get_mock_hotkey, _get_mock_coldkey, MockConsole, _get_mock_keypair, _get_mock_wallet class TestSubtensor(unittest.TestCase): _mock_console_patcher = None @@ -36,9 +36,9 @@ class TestSubtensor(unittest.TestCase): subtensor: MockSubtensor def setUp(self): - self.wallet = get_mock_wallet( - hotkey = get_mock_keypair(0, self.id()), - coldkey = get_mock_keypair(1, self.id()) + self.wallet = _get_mock_wallet( + hotkey = _get_mock_keypair(0, self.id()), + coldkey = _get_mock_keypair(1, self.id()) ) self.balance = Balance.from_tao(1000) self.mock_neuron = MagicMock() # NOTE: this might need more sophistication @@ -212,7 +212,7 @@ def test_stake_failed( self ): self.assertFalse(fail, msg="Stake should fail") def test_transfer( self ): - fake_coldkey = get_mock_coldkey(1) + fake_coldkey = _get_mock_coldkey(1) self.subtensor._do_transfer = MagicMock(return_value = (True, '0x', None)) self.subtensor.register = MagicMock(return_value = True) @@ -225,7 +225,7 @@ def test_transfer( self ): self.assertTrue(success, msg="Transfer should succeed") def test_transfer_inclusion( self ): - fake_coldkey = get_mock_coldkey(1) + fake_coldkey = _get_mock_coldkey(1) self.subtensor._do_transfer = MagicMock(return_value = (True, '0x', None)) self.subtensor.register = MagicMock(return_value = True) self.subtensor.get_neuron_for_pubkey_and_subnet = MagicMock(return_value = self.mock_neuron) @@ -239,7 +239,7 @@ def test_transfer_inclusion( self ): self.assertTrue(success, msg="Transfer should succeed") def test_transfer_failed(self ): - fake_coldkey = get_mock_coldkey(1) + fake_coldkey = _get_mock_coldkey(1) self.subtensor._do_transfer = MagicMock(return_value = (False, None, 'Mock failure message')) fail= self.subtensor.transfer(self.wallet, @@ -250,7 +250,7 @@ def test_transfer_failed(self ): self.assertFalse(fail, msg="Transfer should fail") def test_transfer_invalid_dest(self ): - fake_coldkey = get_mock_coldkey(1) + fake_coldkey = _get_mock_coldkey(1) fail = self.subtensor.transfer(self.wallet, fake_coldkey[:-1], # invalid dest @@ -260,7 +260,7 @@ def test_transfer_invalid_dest(self ): self.assertFalse(fail, msg="Transfer should fail because of invalid dest") def test_transfer_dest_as_bytes(self ): - fake_coldkey = get_mock_coldkey(1) + fake_coldkey = _get_mock_coldkey(1) self.subtensor._do_transfer = MagicMock(return_value = (True, '0x', None)) self.subtensor.register = MagicMock(return_value = True) @@ -319,7 +319,7 @@ def test_set_weights_failed( self ): assert fail == False def test_get_balance( self ): - fake_coldkey = get_mock_coldkey(0) + fake_coldkey = _get_mock_coldkey(0) balance= self.subtensor.get_balance(address=fake_coldkey) assert type(balance) == bittensor.utils.balance.Balance @@ -330,8 +330,8 @@ def test_get_balances( self ): assert type(balances[i]) == bittensor.utils.balance.Balance def test_get_uid_by_hotkey_on_subnet( self ): - mock_coldkey_kp = get_mock_keypair(0, self.id()) - mock_hotkey_kp = get_mock_keypair(100, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) + mock_hotkey_kp = _get_mock_keypair(100, self.id()) # Register on subnet 3 mock_uid = self.subtensor.force_register_neuron( @@ -345,8 +345,8 @@ def test_get_uid_by_hotkey_on_subnet( self ): self.assertEqual(uid, mock_uid, msg="get_uid_for_hotkey_on_subnet should return the correct uid") def test_is_hotkey_registered( self ): - mock_coldkey_kp = get_mock_keypair(0, self.id()) - mock_hotkey_kp = get_mock_keypair(100, self.id()) + mock_coldkey_kp = _get_mock_keypair(0, self.id()) + mock_hotkey_kp = _get_mock_keypair(100, self.id()) # Register on subnet 3 _ = self.subtensor.force_register_neuron( @@ -359,7 +359,7 @@ def test_is_hotkey_registered( self ): self.assertTrue(registered, msg="Hotkey should be registered") def test_is_hotkey_registered_not_registered( self ): - mock_hotkey_kp = get_mock_keypair(100, self.id()) + mock_hotkey_kp = _get_mock_keypair(100, self.id()) # Do not register on subnet 3 @@ -381,9 +381,9 @@ def test_registration_multiprocessed_already_registered( self ): # patch time queue get to raise Empty exception with patch('multiprocessing.queues.Queue.get_nowait', side_effect=QueueEmpty) as mock_queue_get_nowait: - wallet = get_mock_wallet( - hotkey = get_mock_keypair(0, self.id()), - coldkey = get_mock_keypair(1, self.id()) + wallet = _get_mock_wallet( + hotkey = _get_mock_keypair(0, self.id()), + coldkey = _get_mock_keypair(1, self.id()) ) self.subtensor.is_hotkey_registered = MagicMock( side_effect=is_registered_return_values ) @@ -414,9 +414,9 @@ def is_registered_side_effect(*args, **kwargs): with patch('bittensor.Subtensor.get_neuron_for_pubkey_and_subnet', return_value = bittensor.NeuronInfo._null_neuron()): with patch('bittensor.Subtensor.difficulty'): - wallet = get_mock_wallet( - hotkey = get_mock_keypair(0, self.id()), - coldkey = get_mock_keypair(1, self.id()) + wallet = _get_mock_wallet( + hotkey = _get_mock_keypair(0, self.id()), + coldkey = _get_mock_keypair(1, self.id()) ) self.subtensor.is_hotkey_registered = MagicMock(side_effect=is_registered_side_effect) @@ -435,9 +435,9 @@ def test_registration_failed( self ): mock_neuron.is_null = True with patch('bittensor._subtensor.extrinsics.registration.create_pow', return_value=None) as mock_create_pow: - wallet = get_mock_wallet( - hotkey = get_mock_keypair(0, self.id()), - coldkey = get_mock_keypair(1, self.id()) + wallet = _get_mock_wallet( + hotkey = _get_mock_keypair(0, self.id()), + coldkey = _get_mock_keypair(1, self.id()) ) self.subtensor.is_hotkey_registered = MagicMock(side_effect=is_registered_return_values) diff --git a/tests/mocks/keyfile_mock.py b/tests/mocks/keyfile_mock.py deleted file mode 100644 --- a/tests/mocks/keyfile_mock.py +++ /dev/null @@ -1,82 +0,0 @@ -# The MIT License (MIT) - -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -from bittensor_wallet import serialized_keypair_to_keyfile_data, Keyfile -from bittensor_wallet import Keypair - -class MockKeyfile( Keyfile ): - """ Defines an interface to a mocked keyfile object (nothing is created on device) keypair is treated as non encrypted and the data is just the string version. - """ - def __init__( self, path: str ): - super().__init__( path ) - - self._mock_keypair = Keypair.create_from_mnemonic( mnemonic = 'arrive produce someone view end scout bargain coil slight festival excess struggle' ) - self._mock_data = serialized_keypair_to_keyfile_data( self._mock_keypair ) - - def __str__(self): - if not self.exists_on_device(): - return "Keyfile (empty, {})>".format( self.path ) - if self.is_encrypted(): - return "Keyfile (encrypted, {})>".format( self.path ) - else: - return "Keyfile (decrypted, {})>".format( self.path ) - - def __repr__(self): - return self.__str__() - - @property - def keypair( self ) -> 'Keypair': - return self._mock_keypair - - @property - def data( self ) -> bytes: - return bytes(self._mock_data) - - @property - def keyfile_data( self ) -> bytes: - return bytes( self._mock_data) - - def set_keypair ( self, keypair: 'Keypair', encrypt: bool = True, overwrite: bool = False, password:str = None): - self._mock_keypair = keypair - self._mock_data = serialized_keypair_to_keyfile_data( self._mock_keypair ) - - def get_keypair(self, password: str = None) -> 'Keypair': - return self._mock_keypair - - def make_dirs( self ): - return - - def exists_on_device( self ) -> bool: - return True - - def is_readable( self ) -> bool: - return True - - def is_writable( self ) -> bool: - return True - - def is_encrypted ( self ) -> bool: - return False - - def encrypt( self, password: str = None): - raise ValueError('Cannot encrypt a mock keyfile') - - def decrypt( self, password: str = None): - return diff --git a/tests/mocks/wallet_mock.py b/tests/mocks/wallet_mock.py deleted file mode 100644 --- a/tests/mocks/wallet_mock.py +++ /dev/null @@ -1,79 +0,0 @@ -# The MIT License (MIT) - -# Copyright © 2021 Yuma Rao -# Copyright © 2022 Opentensor Foundation -# Copyright © 2023 Opentensor Technologies - -# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -# documentation files (the “Software”), to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, -# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in all copies or substantial portions of -# the Software. - -# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO -# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - -import os -import bittensor -import bittensor_wallet - -from .keyfile_mock import MockKeyfile - -class MockWallet(bittensor_wallet.Wallet): - """ - Mocked Version of the bittensor wallet class, meant to be used for testing - """ - def __init__( - self, - **kwargs, - ): - r""" Init bittensor wallet object containing a hot and coldkey. - Args: - _mock (required=True, default=False): - If true creates a mock wallet with random keys. - """ - super().__init__(**kwargs) - # For mocking. - self._is_mock = True - self._mocked_coldkey_keyfile = None - self._mocked_hotkey_keyfile = None - - print("---- MOCKED WALLET INITIALIZED- ---") - - @property - def hotkey_file(self) -> 'bittensor_wallet.Keyfile': - if self._is_mock: - if self._mocked_hotkey_keyfile == None: - self._mocked_hotkey_keyfile = MockKeyfile(path='MockedHotkey') - return self._mocked_hotkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - hotkey_path = os.path.join(wallet_path, "hotkeys", self.hotkey_str) - return bittensor.keyfile( path = hotkey_path ) - - @property - def coldkey_file(self) -> 'bittensor_wallet.Keyfile': - if self._is_mock: - if self._mocked_coldkey_keyfile == None: - self._mocked_coldkey_keyfile = MockKeyfile(path='MockedColdkey') - return self._mocked_coldkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkey_path = os.path.join(wallet_path, "coldkey") - return bittensor.keyfile( path = coldkey_path ) - - @property - def coldkeypub_file(self) -> 'bittensor_wallet.Keyfile': - if self._is_mock: - if self._mocked_coldkey_keyfile == None: - self._mocked_coldkey_keyfile = MockKeyfile(path='MockedColdkeyPub') - return self._mocked_coldkey_keyfile - else: - wallet_path = os.path.expanduser(os.path.join(self.path, self.name)) - coldkeypub_path = os.path.join(wallet_path, "coldkeypub.txt") - return bittensor_wallet.Keyfile( path = coldkeypub_path ) \ No newline at end of file diff --git a/tests/unit_tests/__init__.py b/tests/unit_tests/__init__.py new file mode 100644 diff --git a/tests/unit_tests/bittensor_tests/__init__.py b/tests/unit_tests/bittensor_tests/__init__.py new file mode 100644 diff --git a/tests/unit_tests/bittensor_tests/test_axon.py b/tests/unit_tests/bittensor_tests/test_axon.py --- a/tests/unit_tests/bittensor_tests/test_axon.py +++ b/tests/unit_tests/bittensor_tests/test_axon.py @@ -26,7 +26,7 @@ import bittensor from bittensor.utils.test_utils import get_random_unused_port -from tests.helpers import get_mock_wallet, get_mock_keypair +from tests.helpers import _get_mock_wallet, _get_mock_keypair def gen_nonce(): return f"{time.monotonic_ns()}" @@ -54,16 +54,16 @@ def is_port_in_use(port): class TestAxon(unittest.TestCase): @classmethod def setUpClass(cls) -> None: - cls.wallet = wallet = get_mock_wallet( - coldkey = get_mock_keypair(0, cls.__name__), - hotkey= get_mock_keypair(100 + 0, cls.__name__), + cls.wallet = wallet = _get_mock_wallet( + coldkey = _get_mock_keypair(0, cls.__name__), + hotkey= _get_mock_keypair(100 + 0, cls.__name__), ) cls.axon = bittensor.axon( wallet = wallet, metagraph = None ) - cls.sender_wallet = get_mock_wallet( - coldkey = get_mock_keypair(1, cls.__name__), - hotkey= get_mock_keypair(100 + 1, cls.__name__), + cls.sender_wallet = _get_mock_wallet( + coldkey = _get_mock_keypair(1, cls.__name__), + hotkey= _get_mock_keypair(100 + 1, cls.__name__), ) diff --git a/tests/unit_tests/bittensor_tests/test_balance.py b/tests/unit_tests/bittensor_tests/test_balance.py --- a/tests/unit_tests/bittensor_tests/test_balance.py +++ b/tests/unit_tests/bittensor_tests/test_balance.py @@ -20,10 +20,11 @@ import pytest from bittensor import Balance -from tests.helpers import CLOSE_IN_VALUE from hypothesis import given from hypothesis import strategies as st +from tests.helpers import CLOSE_IN_VALUE + """ Test the Balance class """ diff --git a/tests/unit_tests/bittensor_tests/utils/__init__.py b/tests/unit_tests/bittensor_tests/utils/__init__.py new file mode 100644 diff --git a/tests/unit_tests/bittensor_tests/utils/test_utils.py b/tests/unit_tests/bittensor_tests/utils/test_utils.py --- a/tests/unit_tests/bittensor_tests/utils/test_utils.py +++ b/tests/unit_tests/bittensor_tests/utils/test_utils.py @@ -26,8 +26,7 @@ from bittensor.utils.registration import _CUDASolver, _SolverBase from bittensor._subtensor.subtensor_mock import MockSubtensor -from tests.mocks.wallet_mock import MockWallet -from tests.helpers import get_mock_wallet as generate_wallet, get_mock_keypair +from tests.helpers import _get_mock_wallet as _generate_wallet, _get_mock_keypair @fixture(scope="function") @@ -665,8 +664,8 @@ def _do_setup_subnet(cls): ) def test_wallet_reregister_reregister_false(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -686,8 +685,8 @@ class MockException(Exception): mock_register.assert_not_called() # should not call register def test_wallet_reregister_reregister_false_and_registered_already(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -716,8 +715,8 @@ class MockException(Exception): mock_register.assert_not_called() # should not call register def test_wallet_reregister_reregister_true_and_registered_already(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -747,8 +746,8 @@ class MockException(Exception): def test_wallet_reregister_no_params(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -770,8 +769,8 @@ class MockException(Exception): mock_register.assert_called_once() # should call register once def test_wallet_reregister_use_cuda_flag_true(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -799,8 +798,8 @@ class MockException(Exception): self.assertEqual(kwargs['cuda'], True) def test_wallet_reregister_use_cuda_flag_false(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) ) @@ -827,8 +826,8 @@ class MockException(Exception): self.assertEqual(kwargs['cuda'], False) def test_wallet_reregister_cuda_arg_not_specified_should_be_false(self): - mock_wallet = generate_wallet( - hotkey = get_mock_keypair( + mock_wallet = _generate_wallet( + hotkey = _get_mock_keypair( 100, self.id() ) )
Discord invite link invalid Please update discord invite link
2023-07-06T04:18:42
opentensor/bittensor
1,834
opentensor__bittensor-1834
[ "1833" ]
752c1ad1dadb34b13f6342a541bfff1900ce1577
diff --git a/bittensor/subtensor.py b/bittensor/subtensor.py --- a/bittensor/subtensor.py +++ b/bittensor/subtensor.py @@ -1,6 +1,7 @@ # The MIT License (MIT) # Copyright © 2021 Yuma Rao # Copyright © 2023 Opentensor Foundation +import functools # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation @@ -92,6 +93,21 @@ T = TypeVar("T") +####### +# Monkey patch in caching the get_decoder_class method +####### +if hasattr(RuntimeConfiguration, "get_decoder_class"): + original_get_decoder_class = RuntimeConfiguration.get_decoder_class + + @functools.lru_cache(maxsize=None) + def cached_get_decoder_class(self, type_string): + return original_get_decoder_class(self, type_string) + + RuntimeConfiguration.get_decoder_class = cached_get_decoder_class + +####### + + class ParamWithTypes(TypedDict): name: str # Name of the parameter. type: str # ScaleType string of the parameter.
Cache get_decoder_class Currently, a large portion of time (29000+ calls for get_delegates) for RPC calls is taken up by decoding. Profiling this shows that the vast majority of this decoding time is actually caused by calls to the `scalecodec.base.RuntimeConfiguration.get_decoder_class` method. Because of the fairly limited number of decoder classes, we should be able to cache this with `functools.cache` to see large speed improvements.
Because the decoding is being done by a third-party library (scalecodec), we will have to monkey-patch in a `functools.cache` call like so: ```python import functools from scalecodec import base as scalecodec_base import bittensor as bt original_get_decoder_class = scalecodec_base.RuntimeConfiguration.get_decoder_class @functools.cache def patched_get_decoder_class(self, type_string): return original_get_decoder_class(self, type_string) scalecodec_base.RuntimeConfiguration.get_decoder_class = patched_get_decoder_class sub = bt.subtensor("finney") sub.get_delegates() ``` With this, we can see a reduction of calls (for `get_delegates`) of the `get_decoder_class` method from 94,542 to 332. In real-world performance, we see that the entire execution time for this script improves by ~48% with the patch implemented. Note that this only involves running five times each, so results may vary with times of day, ping, etc.: | Original | Patched | Run | | -------- | ------- | --- | | 3.754099130630493| 2.3359689712524414| 0 | | 4.5155346393585205| 2.0710458755493164| 1 | | 4.193195104598999| 2.0840811729431152| 2 | | 4.192205905914307| 2.0497679710388184| 3 | | 3.9943289756774902| 2.2274067401885986| 4 | | 4.129872751235962 | 2.153654146194458 | average | I believe implementing this in the code base will drastically reduce overall decode time.
2024-05-01T20:57:12
opentensor/bittensor
1,871
opentensor__bittensor-1871
[ "1867" ]
fd7faa565078267e50e4ae1db4e23668ccf62503
diff --git a/bittensor/subtensor.py b/bittensor/subtensor.py --- a/bittensor/subtensor.py +++ b/bittensor/subtensor.py @@ -4415,7 +4415,7 @@ def make_substrate_call_with_retry(): result = make_substrate_call_with_retry() except scalecodec.exceptions.RemainingScaleBytesNotEmptyException: bittensor.logging.error( - "Your wallet it legacy formatted, you need to run btcli stake --ammount 0 to reformat it." + "Received a corrupted message. This likely points to an error with the network or subnet." ) return Balance(1000) return Balance(result.value["data"]["free"])
diff --git a/tests/unit_tests/test_subtensor.py b/tests/unit_tests/test_subtensor.py --- a/tests/unit_tests/test_subtensor.py +++ b/tests/unit_tests/test_subtensor.py @@ -177,7 +177,7 @@ def test_stake_multiple(): # args, kwargs _, kwargs = mock_do_stake.call_args - assert kwargs["ammount"] == pytest.approx( + assert kwargs["amount"] == pytest.approx( mock_amount.rao, rel=1e9 ) # delta of 1.0 TAO
Typo and bad error message ![Image](https://github.com/opentensor/bittensor/assets/37844818/0decf37d-3710-4996-b473-cb3f591fea3c) This is the error given from a `scalecodec.exceptions.RemainingScaleBytesNotEmptyException` in `subtensor.get_balance`, but I don't think this is necessarily the case. Regardless, the typo needs fixed.
2024-05-13T18:23:39
opentensor/bittensor
1,974
opentensor__bittensor-1974
[ "1839" ]
7e8cbc6c67f4a82c85cac86580249babec155596
diff --git a/bittensor/__init__.py b/bittensor/__init__.py --- a/bittensor/__init__.py +++ b/bittensor/__init__.py @@ -16,15 +16,28 @@ # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +import os +import warnings from rich.console import Console from rich.traceback import install -# Install and apply nest asyncio to allow the async functions -# to run in a .ipynb -import nest_asyncio -nest_asyncio.apply() +if (NEST_ASYNCIO_ENV := os.getenv("NEST_ASYNCIO")) in ("1", None): + if NEST_ASYNCIO_ENV is None: + warnings.warn( + "NEST_ASYNCIO implicitly set to '1'. In the future, the default value will be '0'." + "If you use `nest_asyncio` make sure to add it explicitly to your project dependencies," + "as it will be removed from `bittensor` package dependencies in the future." + "To silence this warning, explicitly set the environment variable, e.g. `export NEST_ASYNCIO=0`.", + DeprecationWarning, + ) + # Install and apply nest asyncio to allow the async functions + # to run in a .ipynb + import nest_asyncio + + nest_asyncio.apply() + # Bittensor code and protocol version. __version__ = "7.0.0"
Remove nest_asyncio from bittensor to allow uvloop support ### Is your feature request related to a problem? Please describe. Uvloop, which provides supperior speed, does not allow loop nesting. It is also the case that uvloop is pulled in by popular packages, which [forces some subnets develop hacks to combat this](https://github.com/synapsec-ai/llm-defender-subnet/blob/6c37925c4f34a298607c97dfceebcc01fb74d562/scripts/run_neuron.sh#L140-L146). And perhaps more importantly, https://github.com/erdewit/nest_asyncio seems to have been abandodend ### Describe the solution you'd like Remove nest_asyncio, and let bittensor users decide which asyncio loop they want to run. Perhaps even suggest (not mandate) running uvloop, since it consistently shows better results in benchmarks than CPython asyncio stdlib loop. Seems like there was some attempt of this in the past https://github.com/opentensor/bittensor/pull/1501 for some reason (?) ### Describe alternatives you've considered _No response_ ### Additional context _No response_
2024-06-04T13:05:03
PaddlePaddle/PaddleOCR
66
PaddlePaddle__PaddleOCR-66
[ "64" ]
bb1a54ef97100db185cfea0b893b89597c9848f5
diff --git a/tools/export_model.py b/tools/export_model.py --- a/tools/export_model.py +++ b/tools/export_model.py @@ -31,7 +31,7 @@ def set_paddle_flags(**kwargs): # NOTE(paddle-dev): All of these flags should be # set before `import paddle`. Otherwise, it would -# not take any effect. +# not take any effect. set_paddle_flags( FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory ) @@ -52,7 +52,7 @@ def main(): # check if set use_gpu=True in paddlepaddle cpu version use_gpu = config['Global']['use_gpu'] - program.check_gpu(True) + program.check_gpu(use_gpu) alg = config['Global']['algorithm'] assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
检测模型转inference模型,'use_gpu': False,但仍然转换失败 你好,在检测模型转inference模型时,已修改det_mv3_db.yml 中'use_gpu': False,但仍然报错,如下所示: python tools/export_model.py -c configs/det/det_mv3_db.yml -o Global.checkpoints=./ch_lite/det_mv3_db/best_accuracy Global.save_inference_dir=./inference_model/det_db/ 2020-05-19 10:29:59,237-INFO: {'Global': {'algorithm': 'DB', 'use_gpu': False, 'epoch_num': 1200, 'log_smooth_window': 20, 'print_batch_step': 2, 'save_model_dir': './output/det_db/', 'save_epoch_step': 200, 'eval_batch_step': 5000, 'train_batch_size_per_card': 16, 'test_batch_size_per_card': 16, 'image_shape': [3, 640, 640], 'reader_yml': './configs/det/det_db_icdar15_reader.yml', 'pretrain_weights': './pretrain_models/MobileNetV3_large_x0_5_pretrained/', 'checkpoints': './ch_lite/det_mv3_db/best_accuracy', 'save_res_path': './output/det_db/predicts_db.txt', 'save_inference_dir': './inference_model/det_db/'}, 'Architecture': {'function': 'ppocr.modeling.architectures.det_model,DetModel'}, 'Backbone': {'function': 'ppocr.modeling.backbones.det_mobilenet_v3,MobileNetV3', 'scale': 0.5, 'model_name': 'large'}, 'Head': {'function': 'ppocr.modeling.heads.det_db_head,DBHead', 'model_name': 'large', 'k': 50, 'inner_channels': 96, 'out_channels': 2}, 'Loss': {'function': 'ppocr.modeling.losses.det_db_loss,DBLoss', 'balance_loss': True, 'main_loss_type': 'DiceLoss', 'alpha': 5, 'beta': 10, 'ohem_ratio': 3}, 'Optimizer': {'function': 'ppocr.optimizer,AdamDecay', 'base_lr': 0.001, 'beta1': 0.9, 'beta2': 0.999}, 'PostProcess': {'function': 'ppocr.postprocess.db_postprocess,DBPostProcess', 'thresh': 0.3, 'box_thresh': 0.7, 'max_candidates': 1000, 'unclip_ratio': 1.5}, 'TrainReader': {'reader_function': 'ppocr.data.det.dataset_traversal,TrainReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTrain', 'num_workers': 8, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/train_icdar2015_label.txt'}, 'EvalReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280]}, 'TestReader': {'reader_function': 'ppocr.data.det.dataset_traversal,EvalTestReader', 'process_function': 'ppocr.data.det.db_process,DBProcessTest', 'single_img_path': None, 'img_set_dir': './train_data/icdar2015/text_localization/', 'label_file_path': './train_data/icdar2015/text_localization/test_icdar2015_label.txt', 'test_image_shape': [736, 1280], 'do_eval': True}} 2020-05-19 10:29:59,238-ERROR: Config use_gpu cannot be set as true while you are using paddlepaddle cpu version ! Please try: 1. Install paddlepaddle-gpu to run model on GPU 2. Set use_gpu as false in config file to run model on CPU
你好,看问题是use_gpu没有生效,你可以手动设置下tools/export_model.py 中第62行 ``` use_gpu=False # 新增 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() ``` 然后试下能否正常运行
2020-05-19T04:01:52
PaddlePaddle/PaddleOCR
1,972
PaddlePaddle__PaddleOCR-1972
[ "1932" ]
6b73d8ed2f4becd046edeebdb33f26cfb42be257
diff --git a/tools/program.py b/tools/program.py --- a/tools/program.py +++ b/tools/program.py @@ -177,6 +177,8 @@ def train(config, model_average = False model.train() + use_srn = config['Architecture']['algorithm'] == "SRN" + if 'start_epoch' in best_model_dict: start_epoch = best_model_dict['start_epoch'] else: @@ -195,7 +197,7 @@ def train(config, break lr = optimizer.get_lr() images = batch[0] - if config['Architecture']['algorithm'] == "SRN": + if use_srn: others = batch[-4:] preds = model(images, others) model_average = True @@ -251,8 +253,12 @@ def train(config, min_average_window=10000, max_average_window=15625) Model_Average.apply() - cur_metric = eval(model, valid_dataloader, post_process_class, - eval_class) + cur_metric = eval( + model, + valid_dataloader, + post_process_class, + eval_class, + use_srn=use_srn) cur_metric_str = 'cur metric, {}'.format(', '.join( ['{}: {}'.format(k, v) for k, v in cur_metric.items()])) logger.info(cur_metric_str) @@ -316,7 +322,8 @@ def train(config, return -def eval(model, valid_dataloader, post_process_class, eval_class): +def eval(model, valid_dataloader, post_process_class, eval_class, + use_srn=False): model.eval() with paddle.no_grad(): total_frame = 0.0 @@ -327,7 +334,8 @@ def eval(model, valid_dataloader, post_process_class, eval_class): break images = batch[0] start = time.time() - if "SRN" in str(model.head): + + if use_srn: others = batch[-4:] preds = model(images, others) else:
distributed training AttributeError: DataParallel' object has no attribute 'head' eval model:: 0%| | 0/74 [00:00<?, ?it/s]Traceback (most recent call last): File "tools/train.py", line 115, in <module> main(config, device, logger, vdl_writer) File "tools/train.py", line 92, in main eval_class, pre_best_model_dict, logger, vdl_writer) File "/paddle/PaddleOCR/tools/program.py", line 258, in train eval_class) File "/paddle/PaddleOCR/tools/program.py", line 334, in eval if "SRN" in str(model.head): File "/usr/local/python3.5.1/lib/python3.5/site-packages/paddle/fluid/dygraph/layers.py", line 1039, in __getattr__ return object.__getattribute__(self, name) AttributeError: 'DataParallel' object has no attribute 'head' command: python3 -u -m paddle.distributed.launch --gpus '4,5,6,7' tools/train.py -c configs/rec/ch_ppocr_v2.0/rec_chinese_lite_train_v2.0.yml
We fixed the problem, please update your dygraph branch code.
2021-02-07T07:34:38
PaddlePaddle/PaddleOCR
2,014
PaddlePaddle__PaddleOCR-2014
[ "2013", "2013" ]
d91c3e7c6b851f1c3d3af95c8f9c3e7276200ee7
diff --git a/ppocr/data/imaug/make_shrink_map.py b/ppocr/data/imaug/make_shrink_map.py --- a/ppocr/data/imaug/make_shrink_map.py +++ b/ppocr/data/imaug/make_shrink_map.py @@ -44,20 +44,33 @@ def __call__(self, data): ignore_tags[i] = True else: polygon_shape = Polygon(polygon) - distance = polygon_shape.area * ( - 1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length - subject = [tuple(l) for l in text_polys[i]] + subject = [tuple(l) for l in polygon] padding = pyclipper.PyclipperOffset() padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) - shrinked = padding.Execute(-distance) + shrinked = [] + + # Increase the shrink ratio every time we get multiple polygon returned back + possible_ratios = np.arange(self.shrink_ratio, 1, self.shrink_ratio) + np.append(possible_ratios, 1) + # print(possible_ratios) + for ratio in possible_ratios: + # print(f"Change shrink ratio to {ratio}") + distance = polygon_shape.area * ( + 1 - np.power(ratio, 2)) / polygon_shape.length + shrinked = padding.Execute(-distance) + if len(shrinked) == 1: + break + if shrinked == []: cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0) ignore_tags[i] = True continue - shrinked = np.array(shrinked[0]).reshape(-1, 2) - cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1) + + for each_shirnk in shrinked: + shirnk = np.array(each_shirnk).reshape(-1, 2) + cv2.fillPoly(gt, [shirnk.astype(np.int32)], 1) # cv2.fillPoly(gt[0], [shrinked.astype(np.int32)], 1) data['shrink_map'] = gt @@ -91,4 +104,4 @@ def polygon_area(self, polygon): edge += (polygon[next_index, 0] - polygon[i, 0]) * ( polygon[next_index, 1] - polygon[i, 1]) - return edge / 2. + return edge / 2. \ No newline at end of file
Shrink map generation at training for DB is wrong for some polygon Hi. Thanks for the great works. After trying the DB training code, I've found out that the current shrink map generation at training for DB is wrong for some polygon. For example, this polygon: ``` [[370, 111], [360, 111], [354, 113], [337, 113], [336, 109], [281, 109], [278, 111], [269, 111], [260, 108], [243, 108], [241, 108], [181, 108], [177, 111], [169, 111], [162, 112], [145, 112], [144, 108], [84, 108], [83, 108], [9, 108], [9, 117], [83, 117], [84, 117], [144, 117], [145, 119], [162, 119], [169, 117], [177, 117], [181, 118], [241, 118], [243, 114], [260, 114], [269, 118], [278, 118], [281, 118], [336, 118], [337, 119], [354, 119], [360, 118], [370, 118]] ``` ![lol2](https://user-images.githubusercontent.com/26834698/108241717-b582d380-717e-11eb-9d7a-d6bf3fc0cf55.png) Will result in this mask after shrinking ![lol](https://user-images.githubusercontent.com/26834698/108241733-bb78b480-717e-11eb-9bfd-da67fc275607.png) Why? We can take a look at this block of code https://github.com/PaddlePaddle/PaddleOCR/blob/d91c3e7c6b851f1c3d3af95c8f9c3e7276200ee7/ppocr/data/imaug/make_shrink_map.py#L47-L60 Since vatti clipping algorithm invoked by pyclipper can return multiple polygon (5 in this case, with the default shrink ratio 0.4) like this: ``` [[[240, 112], [239, 115], [181, 115], [177, 114], [179, 114], [183, 111], [243, 111]], [[367, 115], [360, 115], [356, 116], [361, 114], [367, 114]], [[334, 114], [334, 115], [270, 115], [268, 114], [280, 114], [283, 112], [333, 112]], [[142, 113], [142, 114], [12, 114], [12, 111], [141, 111]], [[147, 116], [147, 115], [162, 115], [169, 114], [162, 116]]] ``` ![lol](https://user-images.githubusercontent.com/26834698/108242184-3510a280-717f-11eb-8cc9-90e14e1f351b.png) But the current implementation only uses the first polygon and assumes there is only one polygon, leading to the wrong shrink map. https://github.com/PaddlePaddle/PaddleOCR/blob/d91c3e7c6b851f1c3d3af95c8f9c3e7276200ee7/ppocr/data/imaug/make_shrink_map.py#L59 This will greatly affect the accuracy of the trained model. Ideally, we would want a shrink map of a text region to not be spliced up like above but rather a shrink map like this (not using the original polygon of course, if it can): ![lol](https://user-images.githubusercontent.com/26834698/108242699-cbdd5f00-717f-11eb-8f05-4d81f53ae649.png) I already sent a PR to fix this issue (#2014). Shrink map generation at training for DB is wrong for some polygon Hi. Thanks for the great works. After trying the DB training code, I've found out that the current shrink map generation at training for DB is wrong for some polygon. For example, this polygon: ``` [[370, 111], [360, 111], [354, 113], [337, 113], [336, 109], [281, 109], [278, 111], [269, 111], [260, 108], [243, 108], [241, 108], [181, 108], [177, 111], [169, 111], [162, 112], [145, 112], [144, 108], [84, 108], [83, 108], [9, 108], [9, 117], [83, 117], [84, 117], [144, 117], [145, 119], [162, 119], [169, 117], [177, 117], [181, 118], [241, 118], [243, 114], [260, 114], [269, 118], [278, 118], [281, 118], [336, 118], [337, 119], [354, 119], [360, 118], [370, 118]] ``` ![lol2](https://user-images.githubusercontent.com/26834698/108241717-b582d380-717e-11eb-9d7a-d6bf3fc0cf55.png) Will result in this mask after shrinking ![lol](https://user-images.githubusercontent.com/26834698/108241733-bb78b480-717e-11eb-9bfd-da67fc275607.png) Why? We can take a look at this block of code https://github.com/PaddlePaddle/PaddleOCR/blob/d91c3e7c6b851f1c3d3af95c8f9c3e7276200ee7/ppocr/data/imaug/make_shrink_map.py#L47-L60 Since vatti clipping algorithm invoked by pyclipper can return multiple polygon (5 in this case, with the default shrink ratio 0.4) like this: ``` [[[240, 112], [239, 115], [181, 115], [177, 114], [179, 114], [183, 111], [243, 111]], [[367, 115], [360, 115], [356, 116], [361, 114], [367, 114]], [[334, 114], [334, 115], [270, 115], [268, 114], [280, 114], [283, 112], [333, 112]], [[142, 113], [142, 114], [12, 114], [12, 111], [141, 111]], [[147, 116], [147, 115], [162, 115], [169, 114], [162, 116]]] ``` ![lol](https://user-images.githubusercontent.com/26834698/108242184-3510a280-717f-11eb-8cc9-90e14e1f351b.png) But the current implementation only uses the first polygon and assumes there is only one polygon, leading to the wrong shrink map. https://github.com/PaddlePaddle/PaddleOCR/blob/d91c3e7c6b851f1c3d3af95c8f9c3e7276200ee7/ppocr/data/imaug/make_shrink_map.py#L59 This will greatly affect the accuracy of the trained model. Ideally, we would want a shrink map of a text region to not be spliced up like above but rather a shrink map like this (not using the original polygon of course, if it can): ![lol](https://user-images.githubusercontent.com/26834698/108242699-cbdd5f00-717f-11eb-8f05-4d81f53ae649.png) I already sent a PR to fix this issue (#2014).
2021-02-17T17:41:05
PaddlePaddle/PaddleOCR
2,654
PaddlePaddle__PaddleOCR-2654
[ "2636" ]
8e4b213877ad2b2649f4b945723cae2a35c8a2a2
diff --git a/tools/program.py b/tools/program.py --- a/tools/program.py +++ b/tools/program.py @@ -18,6 +18,7 @@ import os import sys +import platform import yaml import time import shutil @@ -333,8 +334,10 @@ def eval(model, valid_dataloader, post_process_class, eval_class, total_frame = 0.0 total_time = 0.0 pbar = tqdm(total=len(valid_dataloader), desc='eval model:') + max_iter = len(valid_dataloader) - 1 if platform.system( + ) == "Windows" else len(valid_dataloader) for idx, batch in enumerate(valid_dataloader): - if idx >= len(valid_dataloader): + if idx >= max_iter: break images = batch[0] start = time.time()
win10 训练中断 win10 Cuda11.0 训练icdar2015 检测,减小 batch_size,num_workers还是到 **eval model** 时候中断???? ![image](https://user-images.githubusercontent.com/68001817/116015443-d3335400-a66b-11eb-8192-18745b5305e4.png) ![image](https://user-images.githubusercontent.com/68001817/116017716-f3b2dc80-a672-11eb-83c7-3c86f19f27ae.png)
把这里改成`if idx >= len(valid_dataloader) -1: `试下 https://github.com/PaddlePaddle/PaddleOCR/blob/db37ba33103f25607bc2b0004857378e7e786ca0/tools/program.py#L339
2021-04-26T16:26:07
PaddlePaddle/PaddleOCR
3,268
PaddlePaddle__PaddleOCR-3268
[ "1706" ]
02b75a504dfce94110bb36e2b762c90483fe785f
diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -64,6 +64,24 @@ def __init__(self, args): self.postprocess_op = build_post_process(postprocess_params) self.predictor, self.input_tensor, self.output_tensors, self.config = \ utility.create_predictor(args, 'rec', logger) + self.benchmark = args.benchmark + if args.benchmark: + import auto_log + pid = os.getpid() + self.autolog = auto_log.AutoLogger( + model_name="rec", + model_precision=args.precision, + batch_size=args.rec_batch_num, + data_shape="dynamic", + save_path=args.save_log_path, + inference_config=self.config, + pids=pid, + process_name=None, + gpu_ids=0 if args.use_gpu else None, + time_keys=[ + 'preprocess_time', 'inference_time', 'postprocess_time' + ], + warmup=10) def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape @@ -168,6 +186,8 @@ def __call__(self, img_list): rec_res = [['', 0.0]] * img_num batch_num = self.rec_batch_num st = time.time() + if self.benchmark: + self.autolog.times.start() for beg_img_no in range(0, img_num, batch_num): end_img_no = min(img_num, beg_img_no + batch_num) norm_img_batch = [] @@ -196,6 +216,8 @@ def __call__(self, img_list): norm_img_batch.append(norm_img[0]) norm_img_batch = np.concatenate(norm_img_batch) norm_img_batch = norm_img_batch.copy() + if self.benchmark: + self.autolog.times.stamp() if self.rec_algorithm == "SRN": encoder_word_pos_list = np.concatenate(encoder_word_pos_list) @@ -222,6 +244,8 @@ def __call__(self, img_list): for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) + if self.benchmark: + self.autolog.times.stamp() preds = {"predict": outputs[2]} else: self.input_tensor.copy_from_cpu(norm_img_batch) @@ -231,11 +255,14 @@ def __call__(self, img_list): for output_tensor in self.output_tensors: output = output_tensor.copy_to_cpu() outputs.append(output) + if self.benchmark: + self.autolog.times.stamp() preds = outputs[0] rec_result = self.postprocess_op(preds) for rno in range(len(rec_result)): rec_res[indices[beg_img_no + rno]] = rec_result[rno] - + if self.benchmark: + self.autolog.times.end(stamp=True) return rec_res, time.time() - st @@ -251,9 +278,6 @@ def main(args): for i in range(10): res = text_recognizer([img]) - cpu_mem, gpu_mem, gpu_util = 0, 0, 0 - count = 0 - for image_file in image_file_list: img, flag = check_and_read_gif(image_file) if not flag: @@ -273,6 +297,8 @@ def main(args): for ino in range(len(img_list)): logger.info("Predicts of {}:{}".format(valid_image_file_list[ino], rec_res[ino])) + if args.benchmark: + text_recognizer.autolog.report() if __name__ == "__main__":
diff --git a/test/ocr_rec_params.txt b/test/ocr_rec_params.txt new file mode 100644 --- /dev/null +++ b/test/ocr_rec_params.txt @@ -0,0 +1,35 @@ +model_name:ocr_rec +python:python +gpu_list:0|0,1 +Global.auto_cast:null +Global.epoch_num:10 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card: +Global.use_gpu: +Global.pretrained_model:null + +trainer:norm|pact +norm_train:tools/train.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml +quant_train:deploy/slim/quantization/quant.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml +fpgm_train:null +distill_train:null + +eval:tools/eval.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o + +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o +fpgm_export:null +distill_export:null + +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:True|False +--precision:fp32|fp16|int8 +--rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ \ No newline at end of file diff --git a/test/prepare.sh b/test/prepare.sh --- a/test/prepare.sh +++ b/test/prepare.sh @@ -29,19 +29,21 @@ train_model_list=$(func_parser_value "${lines[0]}") trainer_list=$(func_parser_value "${lines[10]}") - # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] MODE=$2 -# prepare pretrained weights and dataset -wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams -wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar -cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ - +# prepare pretrained weights and dataset +if [ ${train_model_list[*]} = "ocr_det" ]; then + wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar + cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ + fi if [ ${MODE} = "lite_train_infer" ];then # pretrain lite train data rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar - cd ./train_data/ && tar xf icdar2015_lite.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar # todo change to bcebos + + cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar ln -s ./icdar2015_lite ./icdar2015 cd ../ epoch=10 @@ -49,13 +51,15 @@ if [ ${MODE} = "lite_train_infer" ];then elif [ ${MODE} = "whole_train_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar - cd ./train_data/ && tar xf icdar2015.tar && cd ../ + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar + cd ./train_data/ && tar xf icdar2015.tar && tar xf ic15_data.tar && cd ../ epoch=500 eval_batch_step=200 elif [ ${MODE} = "whole_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar - cd ./train_data/ && tar xf icdar2015_infer.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar + cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar ln -s ./icdar2015_infer ./icdar2015 cd ../ epoch=10 @@ -88,9 +92,11 @@ for train_model in ${train_model_list[*]}; do elif [ ${train_model} = "ocr_rec" ];then model_name="ocr_rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar - cd ./inference && tar xf ch_rec_data_200.tar && cd ../ - img_dir="./inference/ch_rec_data_200/" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar + cd ./inference && tar xf rec_inference.tar && cd ../ + img_dir="./inference/rec_inference/" + data_dir=./inference/rec_inference + data_label_file=[./inference/rec_inference/rec_gt_test.txt] fi # eval
some updates repeated 2020.6.5 Support exporting attention model to inference_model 2020.6.5 Support separate prediction and recognition, output result score 2020.6.5 Support exporting attention model to inference_model 2020.6.5 Support separate prediction and recognition, output result score
Where did you see these? https://github.com/PaddlePaddle/PaddleOCR/blob/dygraph/doc/doc_en/update_en.md
2021-07-06T05:36:38
PaddlePaddle/PaddleOCR
3,809
PaddlePaddle__PaddleOCR-3809
[ "3790" ]
68e52250f3587c92b0a99851487a881c25c66837
diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -88,8 +88,8 @@ def __init__(self, args): def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape assert imgC == img.shape[2] - if self.character_type == "ch": - imgW = int((32 * max_wh_ratio)) + max_wh_ratio = max(max_wh_ratio, imgW / imgH) + imgW = int((32 * max_wh_ratio)) h, w = img.shape[:2] ratio = w / float(h) if math.ceil(imgH * ratio) > imgW:
识别模型处理中文时的问题 尝鲜装了最新的版本,发现训练模型和预测模型差异很大,找了半天问题发现又是[这个问题](https://github.com/PaddlePaddle/PaddleOCR/issues/214#issuecomment-645346661)(其实是忘了自己踩过的坑,以为修复了) 因此就想问问能不能把predict_rec和infer_rec文件里的代码同步下?(指处理当rec_char_type= "ch" 的情况下)看到论坛里不少人都有这个问题了,而且是**从去年到现在**。都过去这么久了真的想吐槽吐槽
2021-08-25T07:35:59
PaddlePaddle/PaddleOCR
5,072
PaddlePaddle__PaddleOCR-5072
[ "5065" ]
04c44974b13163450dfb6bd2c327863f8a194b3c
diff --git a/tools/infer/utility.py b/tools/infer/utility.py --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -313,6 +313,11 @@ def create_predictor(args, mode, logger): def get_infer_gpuid(): + if os.name == 'nt': + try: + return int(os.environ['CUDA_VISIBLE_DEVICES'].split(',')[0]) + except KeyError: + return 0 if not paddle.fluid.core.is_compiled_with_rocm(): cmd = "env | grep CUDA_VISIBLE_DEVICES" else:
Windows下, OCR识别报错-- 'env' 不是内部或外部命令,也不是可运行的程序或批处理文件 请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem - 系统环境/System Environment:Windows10 - 版本号/Version:Paddle:2.3 PaddleOCR:2.3.0.1 - 问题相关组件/Related components: ppocr - 运行指令/Command Code: - ![QQ图片20211227103121](https://user-images.githubusercontent.com/3079297/147428430-1e61ca3e-c4ce-4c1e-8095-7ec327592752.jpg) - 完整报错/Complete Error Message: ![QQ图片20211226232206](https://user-images.githubusercontent.com/3079297/147428471-03f24025-8077-4491-883e-77b7ee20326a.jpg) 应该是一下代码引起: PaddleOCR/tools/infer/utility.py Line#315 ``` def get_infer_gpuid(): if not paddle.fluid.core.is_compiled_with_rocm(): cmd = "env | grep CUDA_VISIBLE_DEVICES" else: cmd = "env | grep HIP_VISIBLE_DEVICES" env_cuda = os.popen(cmd).readlines() ``` 直接将代码替换为 ``` def get_infer_gpuid(): return 0 ``` 可临时解决.
2021-12-27T04:27:07
PaddlePaddle/PaddleOCR
6,466
PaddlePaddle__PaddleOCR-6466
[ "6453" ]
8bdb9d4e1028529d44d612612ef5ac54d57e7ea4
diff --git a/deploy/pdserving/ocr_reader.py b/deploy/pdserving/ocr_reader.py --- a/deploy/pdserving/ocr_reader.py +++ b/deploy/pdserving/ocr_reader.py @@ -339,7 +339,7 @@ def get_beg_end_flag_idx(self, beg_or_end): class OCRReader(object): def __init__(self, algorithm="CRNN", - image_shape=[3, 32, 320], + image_shape=[3, 48, 320], char_type="ch", batch_num=1, char_dict_path="./ppocr_keys_v1.txt"): @@ -356,7 +356,7 @@ def __init__(self, def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape if self.character_type == "ch": - imgW = int(32 * max_wh_ratio) + imgW = int(imgH * max_wh_ratio) h = img.shape[0] w = img.shape[1] ratio = w / float(h) @@ -377,7 +377,7 @@ def resize_norm_img(self, img, max_wh_ratio): def preprocess(self, img_list): img_num = len(img_list) norm_img_batch = [] - max_wh_ratio = 0 + max_wh_ratio = 320/48. for ino in range(img_num): h, w = img_list[ino].shape[0:2] wh_ratio = w * 1.0 / h diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py --- a/deploy/pdserving/web_service.py +++ b/deploy/pdserving/web_service.py @@ -63,7 +63,6 @@ def postprocess(self, input_dicts, fetch_dict, data_id, log_id): dt_boxes_list = self.post_func(det_out, [ratio_list]) dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w]) out_dict = {"dt_boxes": dt_boxes, "image": self.raw_im} - return out_dict, None, "" @@ -86,7 +85,7 @@ def preprocess(self, input_dicts, data_id, log_id): dt_boxes = copy.deepcopy(self.dt_list) feed_list = [] img_list = [] - max_wh_ratio = 0 + max_wh_ratio = 320/48. ## Many mini-batchs, the type of feed_data is list. max_batch_size = 6 # len(dt_boxes) @@ -150,7 +149,8 @@ def postprocess(self, input_dicts, fetch_data, data_id, log_id): for i in range(dt_num): text = rec_list[i] dt_box = self.dt_list[i] - result_list.append([text, dt_box.tolist()]) + if text[1] >= 0.5: + result_list.append([text, dt_box.tolist()]) res = {"result": str(result_list)} return res, None, ""
pdserver pipeline模式 v3版本文字识别准确率问题 pdserver pipeline模式 v3版本文字识别准确率比v2版本下降很多,使用的启动脚本和预测脚本都和V2版本相同,观察了一下很多特殊字符例如-这种都检测不出来,是哪里有配置需要改动吗
收到反馈~ 方便提供一张测试图片吗? 我们排查下问题 > 这张图片,-15.45,-457.38前面的负号识别不出来 ![80C46FFDE1C61A5F07B8254B5DF4437D_IMAGE](https://user-images.githubusercontent.com/26426479/171311483-d67c4816-f72e-4bf1-a26c-ef305e01dc5e.jpg)
2022-06-01T09:57:38
PaddlePaddle/PaddleOCR
7,535
PaddlePaddle__PaddleOCR-7535
[ "7534" ]
2e352dcc06ba86159099ec6a2928c7ce556a7245
diff --git a/paddleocr.py b/paddleocr.py --- a/paddleocr.py +++ b/paddleocr.py @@ -480,10 +480,11 @@ def __init__(self, **kwargs): params.rec_image_shape = "3, 48, 320" else: params.rec_image_shape = "3, 32, 320" - # download model - maybe_download(params.det_model_dir, det_url) - maybe_download(params.rec_model_dir, rec_url) - maybe_download(params.cls_model_dir, cls_url) + # download model if using paddle infer + if not params.use_onnx: + maybe_download(params.det_model_dir, det_url) + maybe_download(params.rec_model_dir, rec_url) + maybe_download(params.cls_model_dir, cls_url) if params.det_algorithm not in SUPPORT_DET_MODEL: logger.error('det_algorithm must in {}'.format(SUPPORT_DET_MODEL))
PaddleOCR(use_onnx=True) will try to download wrong model file 请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem - 系统环境/System Environment:MacOS 12.4 - 版本号/Version:Paddle:2.3.1 PaddleOCR:2.6 问题相关组件/Related components:PaddleOCR python class - 运行指令/Command Code: ```python PaddleOCR(use_onnx=True, det_model_dir=os.path.expanduser("~/.paddleocr/onnx/ch_PP-OCRv3_det_infer.onnx"), rec_model_dir=os.path.expanduser("~/.paddleocr/onnx/ch_PP-OCRv3_rec_infer.onnx"), cls_model_dir=os.path.expanduser("~/.paddleocr/onnx/ch_ppocr_mobile_v2.0_cls_infer.onnx")) ``` - 完整报错/Complete Error Message: ``` download https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar to ~/.paddleocr/onnx/ch_PP-OCRv3_det_infer.onnx/ch_PP-OCRv3_det_infer.tar Process SpawnProcess-5: Traceback (most recent call last): ... (some framework file) ... File "..././main.py", line 32, in <module> PaddleOCR(use_onnx=True, det_model_dir=os.path.expanduser("~/.paddleocr/onnx/ch_PP-OCRv3_det_infer.onnx"), File "[VENV PATH]/lib/python3.9/site-packages/paddleocr/paddleocr.py", line 457, in __init__ maybe_download(params.det_model_dir, det_url) File "[VENV PATH]/lib/python3.9/site-packages/paddleocr/ppocr/utils/network.py", line 52, in maybe_download os.makedirs(model_storage_directory, exist_ok=True) File "/usr/local/Cellar/[email protected]/3.9.10/Frameworks/Python.framework/Versions/3.9/lib/python3.9/os.py", line 225, in makedirs mkdir(name, mode) FileExistsError: [Errno 17] File exists: '~/.paddleocr/onnx/ch_PP-OCRv3_det_infer.onnx' ``` I've already convert paddleocr model into onnx format, but when I tried to use them in PaddleOCR python class, the maybe_download function still try to download paddle model file. I've created a PR and will submit it soon.
2022-09-08T10:38:19
PaddlePaddle/PaddleOCR
8,926
PaddlePaddle__PaddleOCR-8926
[ "8855" ]
9059f747e62bb0ffe2a3dc06f011627eed56c882
diff --git a/ppocr/utils/gen_label.py b/ppocr/utils/gen_label.py --- a/ppocr/utils/gen_label.py +++ b/ppocr/utils/gen_label.py @@ -29,7 +29,7 @@ def gen_rec_label(input_path, out_label): def gen_det_label(root_path, input_dir, out_label): with open(out_label, 'w') as out_file: for label_file in os.listdir(input_dir): - img_path = root_path + label_file[3:-4] + ".jpg" + img_path = os.path.join(root_path, label_file[3:-4] + ".jpg") label = [] with open( os.path.join(input_dir, label_file), 'r', diff --git a/tools/infer/utility.py b/tools/infer/utility.py --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -42,6 +42,7 @@ def init_args(): parser.add_argument("--min_subgraph_size", type=int, default=15) parser.add_argument("--precision", type=str, default="fp32") parser.add_argument("--gpu_mem", type=int, default=500) + parser.add_argument("--gpu_id", type=int, default=0) # params for text detector parser.add_argument("--image_dir", type=str) @@ -219,7 +220,7 @@ def create_predictor(args, mode, logger): logger.warning( "GPU is not found in current device by nvidia-smi. Please check your device or ignore it if run on jetson." ) - config.enable_use_gpu(args.gpu_mem, 0) + config.enable_use_gpu(args.gpu_mem, args.gpu_id) if args.use_tensorrt: config.enable_tensorrt_engine( workspace_size=1 << 30,
paddleocr中gen_lable.py对icdar2015数据转换有误 ![image](https://user-images.githubusercontent.com/119492593/212064964-c7db056f-3209-41f6-a9e0-daa5bc790c61.png) 我用gen_lable.py转换的数据是这样子的,我感觉好像是错误的 不应该是:ch4_training_images/img_1.jpg [{"transcription": "Genaxis Theatre", "points": [[377, 117], [463, 117], [465, 130], [378, 130]
2023-01-30T08:19:22
PaddlePaddle/PaddleOCR
9,099
PaddlePaddle__PaddleOCR-9099
[ "8855" ]
98c44d182b69fc41b1ad8049a4e4d3bb5df892a9
diff --git a/ppocr/postprocess/east_postprocess.py b/ppocr/postprocess/east_postprocess.py --- a/ppocr/postprocess/east_postprocess.py +++ b/ppocr/postprocess/east_postprocess.py @@ -81,6 +81,7 @@ def detect(self, try: check_install('lanms', 'lanms-nova') import lanms + boxes = lanms.merge_quadrangle_n9(boxes, nms_thresh) except: print( 'You should install lanms by pip3 install lanms-nova to speed up nms_locality'
paddleocr中gen_lable.py对icdar2015数据转换有误 ![image](https://user-images.githubusercontent.com/119492593/212064964-c7db056f-3209-41f6-a9e0-daa5bc790c61.png) 我用gen_lable.py转换的数据是这样子的,我感觉好像是错误的 不应该是:ch4_training_images/img_1.jpg [{"transcription": "Genaxis Theatre", "points": [[377, 117], [463, 117], [465, 130], [378, 130]
2023-02-16T03:11:33
PaddlePaddle/PaddleOCR
9,898
PaddlePaddle__PaddleOCR-9898
[ "8855" ]
7e0c8aea840bb44720c73b4b02a7f6b04b402ec4
diff --git a/ppocr/modeling/heads/__init__.py b/ppocr/modeling/heads/__init__.py --- a/ppocr/modeling/heads/__init__.py +++ b/ppocr/modeling/heads/__init__.py @@ -17,7 +17,7 @@ def build_head(config): # det head - from .det_db_head import DBHead, CBNHeadLocal + from .det_db_head import DBHead, PFHeadLocal from .det_east_head import EASTHead from .det_sast_head import SASTHead from .det_pse_head import PSEHead @@ -56,7 +56,7 @@ def build_head(config): 'TableAttentionHead', 'SARHead', 'AsterHead', 'SDMGRHead', 'PRENHead', 'MultiHead', 'ABINetHead', 'TableMasterHead', 'SPINAttentionHead', 'VLHead', 'SLAHead', 'RobustScannerHead', 'CT_Head', 'RFLHead', - 'DRRGHead', 'CANHead', 'SATRNHead', 'CBNHeadLocal' + 'DRRGHead', 'CANHead', 'SATRNHead', 'PFHeadLocal' ] if config['name'] == 'DRRGHead': diff --git a/ppocr/modeling/heads/det_db_head.py b/ppocr/modeling/heads/det_db_head.py --- a/ppocr/modeling/heads/det_db_head.py +++ b/ppocr/modeling/heads/det_db_head.py @@ -129,9 +129,9 @@ def forward(self, x, init_map, distance_map): return out -class CBNHeadLocal(DBHead): +class PFHeadLocal(DBHead): def __init__(self, in_channels, k=50, mode='small', **kwargs): - super(CBNHeadLocal, self).__init__(in_channels, k, **kwargs) + super(PFHeadLocal, self).__init__(in_channels, k, **kwargs) self.mode = mode self.up_conv = nn.Upsample(scale_factor=2, mode="nearest", align_mode=1)
paddleocr中gen_lable.py对icdar2015数据转换有误 ![image](https://user-images.githubusercontent.com/119492593/212064964-c7db056f-3209-41f6-a9e0-daa5bc790c61.png) 我用gen_lable.py转换的数据是这样子的,我感觉好像是错误的 不应该是:ch4_training_images/img_1.jpg [{"transcription": "Genaxis Theatre", "points": [[377, 117], [463, 117], [465, 130], [378, 130]
2023-05-09T02:45:17
PaddlePaddle/PaddleOCR
9,921
PaddlePaddle__PaddleOCR-9921
[ "8855" ]
dd8cc21b2fe010e8c4c8f5612007e2f11466a315
diff --git a/ppocr/losses/basic_loss.py b/ppocr/losses/basic_loss.py --- a/ppocr/losses/basic_loss.py +++ b/ppocr/losses/basic_loss.py @@ -165,3 +165,79 @@ def forward(self, predicts, batch): elif self.reduction == 'sum': loss = paddle.sum(loss) return {'loss': loss} + + +class KLDivLoss(nn.Layer): + """ + KLDivLoss + """ + + def __init__(self): + super().__init__() + + def _kldiv(self, x, target, mask=None): + eps = 1.0e-10 + loss = target * (paddle.log(target + eps) - x) + if mask is not None: + loss = loss.flatten(0, 1).sum(axis=1) + loss = loss.masked_select(mask).mean() + else: + # batch mean loss + loss = paddle.sum(loss) / loss.shape[0] + return loss + + def forward(self, logits_s, logits_t, mask=None): + log_out_s = F.log_softmax(logits_s, axis=-1) + out_t = F.softmax(logits_t, axis=-1) + loss = self._kldiv(log_out_s, out_t, mask) + return loss + + +class DKDLoss(nn.Layer): + """ + KLDivLoss + """ + + def __init__(self, temperature=1.0, alpha=1.0, beta=1.0): + super().__init__() + self.temperature = temperature + self.alpha = alpha + self.beta = beta + + def _cat_mask(self, t, mask1, mask2): + t1 = (t * mask1).sum(axis=1, keepdim=True) + t2 = (t * mask2).sum(axis=1, keepdim=True) + rt = paddle.concat([t1, t2], axis=1) + return rt + + def _kl_div(self, x, label, mask=None): + y = (label * (paddle.log(label + 1e-10) - x)).sum(axis=1) + if mask is not None: + y = y.masked_select(mask).mean() + else: + y = y.mean() + return y + + def forward(self, logits_student, logits_teacher, target, mask=None): + gt_mask = F.one_hot( + target.reshape([-1]), num_classes=logits_student.shape[-1]) + other_mask = 1 - gt_mask + logits_student = logits_student.flatten(0, 1) + logits_teacher = logits_teacher.flatten(0, 1) + pred_student = F.softmax(logits_student / self.temperature, axis=1) + pred_teacher = F.softmax(logits_teacher / self.temperature, axis=1) + pred_student = self._cat_mask(pred_student, gt_mask, other_mask) + pred_teacher = self._cat_mask(pred_teacher, gt_mask, other_mask) + log_pred_student = paddle.log(pred_student) + tckd_loss = self._kl_div(log_pred_student, + pred_teacher) * (self.temperature**2) + pred_teacher_part2 = F.softmax( + logits_teacher / self.temperature - 1000.0 * gt_mask, axis=1) + log_pred_student_part2 = F.log_softmax( + logits_student / self.temperature - 1000.0 * gt_mask, axis=1) + nckd_loss = self._kl_div(log_pred_student_part2, + pred_teacher_part2) * (self.temperature**2) + + loss = self.alpha * tckd_loss + self.beta * nckd_loss + + return loss diff --git a/ppocr/losses/combined_loss.py b/ppocr/losses/combined_loss.py --- a/ppocr/losses/combined_loss.py +++ b/ppocr/losses/combined_loss.py @@ -20,9 +20,9 @@ from .ace_loss import ACELoss from .rec_sar_loss import SARLoss -from .distillation_loss import DistillationCTCLoss -from .distillation_loss import DistillationSARLoss -from .distillation_loss import DistillationDMLLoss +from .distillation_loss import DistillationCTCLoss, DistillCTCLogits +from .distillation_loss import DistillationSARLoss, DistillationNRTRLoss +from .distillation_loss import DistillationDMLLoss, DistillationKLDivLoss, DistillationDKDLoss from .distillation_loss import DistillationDistanceLoss, DistillationDBLoss, DistillationDilaDBLoss from .distillation_loss import DistillationVQASerTokenLayoutLMLoss, DistillationSERDMLLoss from .distillation_loss import DistillationLossFromOutput diff --git a/ppocr/losses/distillation_loss.py b/ppocr/losses/distillation_loss.py --- a/ppocr/losses/distillation_loss.py +++ b/ppocr/losses/distillation_loss.py @@ -14,12 +14,14 @@ import paddle import paddle.nn as nn +import paddle.nn.functional as F import numpy as np import cv2 from .rec_ctc_loss import CTCLoss from .rec_sar_loss import SARLoss -from .basic_loss import DMLLoss +from .rec_ce_loss import CELoss +from .basic_loss import DMLLoss, KLDivLoss, DKDLoss from .basic_loss import DistanceLoss from .basic_loss import LossFromOutput from .det_db_loss import DBLoss @@ -102,11 +104,220 @@ def forward(self, predicts, batch): if self.key is not None: out1 = out1[self.key] out2 = out2[self.key] + if self.maps_name is None: + if self.multi_head: + # for nrtr dml loss + max_len = batch[3].max() + tgt = batch[2][:, 1:2 + max_len] + tgt = tgt.reshape([-1]) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, dtype=tgt.dtype)) + loss = super().forward(out1[self.dis_head], + out2[self.dis_head], non_pad_mask) + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + else: + outs1 = self._slice_out(out1) + outs2 = self._slice_out(out2) + for _c, k in enumerate(outs1.keys()): + loss = super().forward(outs1[k], outs2[k]) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}_{}".format(key, pair[ + 0], pair[1], self.maps_name, idx)] = loss[key] + else: + loss_dict["{}_{}_{}".format(self.name, self.maps_name[ + _c], idx)] = loss + + loss_dict = _sum_loss(loss_dict) + + return loss_dict + + +class DistillationKLDivLoss(KLDivLoss): + """ + """ + def __init__(self, + model_name_pairs=[], + key=None, + multi_head=False, + dis_head='ctc', + maps_name=None, + name="kl_div"): + super().__init__() + assert isinstance(model_name_pairs, list) + self.key = key + self.multi_head = multi_head + self.dis_head = dis_head + self.model_name_pairs = self._check_model_name_pairs(model_name_pairs) + self.name = name + self.maps_name = self._check_maps_name(maps_name) + + def _check_model_name_pairs(self, model_name_pairs): + if not isinstance(model_name_pairs, list): + return [] + elif isinstance(model_name_pairs[0], list) and isinstance( + model_name_pairs[0][0], str): + return model_name_pairs + else: + return [model_name_pairs] + + def _check_maps_name(self, maps_name): + if maps_name is None: + return None + elif type(maps_name) == str: + return [maps_name] + elif type(maps_name) == list: + return [maps_name] + else: + return None + + def _slice_out(self, outs): + new_outs = {} + for k in self.maps_name: + if k == "thrink_maps": + new_outs[k] = outs[:, 0, :, :] + elif k == "threshold_maps": + new_outs[k] = outs[:, 1, :, :] + elif k == "binary_maps": + new_outs[k] = outs[:, 2, :, :] + else: + continue + return new_outs + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] if self.maps_name is None: if self.multi_head: + # for nrtr dml loss + max_len = batch[3].max() + tgt = batch[2][:, 1:2 + max_len] + tgt = tgt.reshape([-1]) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, dtype=tgt.dtype)) loss = super().forward(out1[self.dis_head], - out2[self.dis_head]) + out2[self.dis_head], non_pad_mask) + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + else: + outs1 = self._slice_out(out1) + outs2 = self._slice_out(out2) + for _c, k in enumerate(outs1.keys()): + loss = super().forward(outs1[k], outs2[k]) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}_{}".format(key, pair[ + 0], pair[1], self.maps_name, idx)] = loss[key] + else: + loss_dict["{}_{}_{}".format(self.name, self.maps_name[ + _c], idx)] = loss + + loss_dict = _sum_loss(loss_dict) + + return loss_dict + + +class DistillationDKDLoss(DKDLoss): + """ + """ + + def __init__(self, + model_name_pairs=[], + key=None, + multi_head=False, + dis_head='ctc', + maps_name=None, + name="dkd", + temperature=1.0, + alpha=1.0, + beta=1.0): + super().__init__(temperature, alpha, beta) + assert isinstance(model_name_pairs, list) + self.key = key + self.multi_head = multi_head + self.dis_head = dis_head + self.model_name_pairs = self._check_model_name_pairs(model_name_pairs) + self.name = name + self.maps_name = self._check_maps_name(maps_name) + + def _check_model_name_pairs(self, model_name_pairs): + if not isinstance(model_name_pairs, list): + return [] + elif isinstance(model_name_pairs[0], list) and isinstance( + model_name_pairs[0][0], str): + return model_name_pairs + else: + return [model_name_pairs] + + def _check_maps_name(self, maps_name): + if maps_name is None: + return None + elif type(maps_name) == str: + return [maps_name] + elif type(maps_name) == list: + return [maps_name] + else: + return None + + def _slice_out(self, outs): + new_outs = {} + for k in self.maps_name: + if k == "thrink_maps": + new_outs[k] = outs[:, 0, :, :] + elif k == "threshold_maps": + new_outs[k] = outs[:, 1, :, :] + elif k == "binary_maps": + new_outs[k] = outs[:, 2, :, :] + else: + continue + return new_outs + + def forward(self, predicts, batch): + loss_dict = dict() + + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + if self.maps_name is None: + if self.multi_head: + # for nrtr dml loss + max_len = batch[3].max() + tgt = batch[2][:, 1:2 + + max_len] # [batch_size, max_len + 1] + + tgt = tgt.reshape([-1]) # batch_size * (max_len + 1) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, + dtype=tgt.dtype)) # batch_size * (max_len + 1) + + loss = super().forward( + out1[self.dis_head], out2[self.dis_head], tgt, + non_pad_mask) # [batch_size, max_len + 1, num_char] else: loss = super().forward(out1, out2) if isinstance(loss, dict): @@ -199,6 +410,40 @@ def forward(self, predicts, batch): return loss_dict +class DistillationNRTRLoss(CELoss): + def __init__(self, + model_name_list=[], + key=None, + multi_head=False, + smoothing=True, + name="loss_nrtr", + **kwargs): + super().__init__(smoothing=smoothing) + self.model_name_list = model_name_list + self.key = key + self.name = name + self.multi_head = multi_head + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, model_name in enumerate(self.model_name_list): + out = predicts[model_name] + if self.key is not None: + out = out[self.key] + if self.multi_head: + assert 'gtc' in out, 'multi head has multi out' + loss = super().forward(out['gtc'], batch[:1] + batch[2:]) + else: + loss = super().forward(out, batch) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}".format(self.name, model_name, + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, model_name)] = loss + return loss_dict + + class DistillationDBLoss(DBLoss): def __init__(self, model_name_list=[], @@ -459,3 +704,212 @@ def forward(self, predicts, batch): loss_dict["{}_{}_{}_{}".format(self.name, pair[0], pair[1], idx)] = loss return loss_dict + + +class CTCDKDLoss(nn.Layer): + """ + KLDivLoss + """ + + def __init__(self, temperature=0.5, alpha=1.0, beta=1.0): + super().__init__() + self.temperature = temperature + self.alpha = alpha + self.beta = beta + self.eps = 1e-6 + self.t = temperature + self.act = nn.Softmax(axis=-1) + self.use_log = True + + def kl_loss(self, p1, p2): # predict, label + loss = paddle.multiply( + p2, paddle.log((p2 + self.eps) / (p1 + self.eps) + self.eps)) + bs = loss.shape[0] + loss = paddle.sum(loss) / bs + return loss + + def _cat_mask(self, t, mask1, mask2): + t1 = (t * mask1).sum(axis=1, keepdim=True) + t2 = (t * mask2).sum(axis=1, keepdim=True) + rt = paddle.concat([t1, t2], axis=1) + return rt + + def multi_label_mask(self, targets): + + targets = targets.astype("int32") + res = F.one_hot(targets, num_classes=11465) + mask = paddle.clip(paddle.sum(res, axis=1), 0, 1) + mask[:, 0] = 0 # ingore ctc blank label + return mask + + def forward(self, logits_student, logits_teacher, targets, mask=None): + + gt_mask = self.multi_label_mask(targets) + other_mask = paddle.ones_like(gt_mask) - gt_mask + + pred_student = F.softmax(logits_student / self.temperature, axis=-1) + pred_teacher = F.softmax(logits_teacher / self.temperature, axis=-1) + + # differents with dkd + pred_student = paddle.mean(pred_student, axis=1) + pred_teacher = paddle.mean(pred_teacher, axis=1) + + pred_student = self._cat_mask(pred_student, gt_mask, other_mask) + pred_teacher = self._cat_mask(pred_teacher, gt_mask, other_mask) + + # differents with dkd + tckd_loss = self.kl_loss(pred_student, pred_teacher) + + gt_mask_ex = paddle.expand_as(gt_mask.unsqueeze(axis=1), logits_teacher) + pred_teacher_part2 = F.softmax( + logits_teacher / self.temperature - 1000.0 * gt_mask_ex, axis=-1) + pred_student_part2 = F.softmax( + logits_student / self.temperature - 1000.0 * gt_mask_ex, axis=-1) + # differents with dkd + pred_teacher_part2 = paddle.mean(pred_teacher_part2, axis=1) + pred_student_part2 = paddle.mean(pred_student_part2, axis=1) + + # differents with dkd + nckd_loss = self.kl_loss(pred_student_part2, pred_teacher_part2) + loss = self.alpha * tckd_loss + self.beta * nckd_loss + return loss + + +class KLCTCLogits(nn.Layer): + def __init__(self, weight=1.0, reduction='mean', mode="mean"): + super().__init__() + self.weight = weight + self.reduction = reduction + self.eps = 1e-6 + self.t = 0.5 + self.act = nn.Softmax(axis=-1) + self.use_log = True + self.mode = mode + self.ctc_dkd_loss = CTCDKDLoss() + + def kl_loss(self, p1, p2): # predict, label + loss = paddle.multiply( + p2, paddle.log((p2 + self.eps) / (p1 + self.eps) + self.eps)) + bs = loss.shape[0] + loss = paddle.sum(loss) / bs + return loss + + def forward_meanmax(self, stu_out, tea_out): + + stu_out = paddle.mean(F.softmax(stu_out / self.t, axis=-1), axis=1) + tea_out = paddle.mean(F.softmax(tea_out / self.t, axis=-1), axis=1) + loss = self.kl_loss(stu_out, tea_out) + + return loss + + def forward_meanlog(self, stu_out, tea_out): + stu_out = paddle.mean(F.softmax(stu_out / self.t, axis=-1), axis=1) + tea_out = paddle.mean(F.softmax(tea_out / self.t, axis=-1), axis=1) + if self.use_log is True: + # for recognition distillation, log is needed for feature map + log_out1 = paddle.log(stu_out) + log_out2 = paddle.log(tea_out) + loss = ( + self._kldiv(log_out1, tea_out) + self._kldiv(log_out2, stu_out) + ) / 2.0 + + return loss + + def forward_sum(self, stu_out, tea_out): + stu_out = paddle.sum(F.softmax(stu_out / self.t, axis=-1), axis=1) + tea_out = paddle.sum(F.softmax(tea_out / self.t, axis=-1), axis=1) + stu_out = paddle.log(stu_out) + bs = stu_out.shape[0] + loss = tea_out * (paddle.log(tea_out + self.eps) - stu_out) + loss = paddle.sum(loss, axis=1) / loss.shape[0] + return loss + + def _kldiv(self, x, target): + eps = 1.0e-10 + loss = target * (paddle.log(target + eps) - x) + loss = paddle.sum(paddle.mean(loss, axis=1)) / loss.shape[0] + return loss + + def forward(self, stu_out, tea_out, targets=None): + if self.mode == "log": + return self.forward_log(stu_out, tea_out) + elif self.mode == "mean": + blank_mask = paddle.ones_like(stu_out) + blank_mask.stop_gradient = True + blank_mask[:, :, 0] = -1 + stu_out *= blank_mask + tea_out *= blank_mask + return self.forward_meanmax(stu_out, tea_out) + elif self.mode == "sum": + return self.forward_sum(stu_out, tea_out) + elif self.mode == "meanlog": + blank_mask = paddle.ones_like(stu_out) + blank_mask.stop_gradient = True + blank_mask[:, :, 0] = -1 + stu_out *= blank_mask + tea_out *= blank_mask + return self.forward_meanlog(stu_out, tea_out) + elif self.mode == "ctcdkd": + # ingore ctc blank logits + blank_mask = paddle.ones_like(stu_out) + blank_mask.stop_gradient = True + blank_mask[:, :, 0] = -1 + stu_out *= blank_mask + tea_out *= blank_mask + return self.ctc_dkd_loss(stu_out, tea_out, targets) + else: + raise ValueError("error!!!!!!") + + def forward_log(self, out1, out2): + if self.act is not None: + out1 = self.act(out1) + 1e-10 + out2 = self.act(out2) + 1e-10 + if self.use_log is True: + # for recognition distillation, log is needed for feature map + log_out1 = paddle.log(out1) + log_out2 = paddle.log(out2) + loss = ( + self._kldiv(log_out1, out2) + self._kldiv(log_out2, out1)) / 2.0 + + return loss + + +class DistillCTCLogits(KLCTCLogits): + def __init__(self, + model_name_pairs=[], + key=None, + name="ctc_logits", + reduction="mean"): + super().__init__(reduction=reduction) + self.model_name_pairs = self._check_model_name_pairs(model_name_pairs) + self.key = key + self.name = name + + def _check_model_name_pairs(self, model_name_pairs): + if not isinstance(model_name_pairs, list): + return [] + elif isinstance(model_name_pairs[0], list) and isinstance( + model_name_pairs[0][0], str): + return model_name_pairs + else: + return [model_name_pairs] + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + + if self.key is not None: + out1 = out1[self.key]['ctc'] + out2 = out2[self.key]['ctc'] + + ctc_label = batch[1] + loss = super().forward(out1, out2, ctc_label) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}".format(self.name, model_name, + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + return loss_dict
paddleocr中gen_lable.py对icdar2015数据转换有误 ![image](https://user-images.githubusercontent.com/119492593/212064964-c7db056f-3209-41f6-a9e0-daa5bc790c61.png) 我用gen_lable.py转换的数据是这样子的,我感觉好像是错误的 不应该是:ch4_training_images/img_1.jpg [{"transcription": "Genaxis Theatre", "points": [[377, 117], [463, 117], [465, 130], [378, 130]
2023-05-11T08:50:22
PaddlePaddle/PaddleOCR
9,968
PaddlePaddle__PaddleOCR-9968
[ "8855" ]
1643f268d320a6423be8e1b82d7f1b69dd041a12
diff --git a/ppocr/losses/distillation_loss.py b/ppocr/losses/distillation_loss.py --- a/ppocr/losses/distillation_loss.py +++ b/ppocr/losses/distillation_loss.py @@ -96,6 +96,96 @@ def _slice_out(self, outs): continue return new_outs + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + if self.maps_name is None: + if self.multi_head: + loss = super().forward(out1[self.dis_head], + out2[self.dis_head]) + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + else: + outs1 = self._slice_out(out1) + outs2 = self._slice_out(out2) + for _c, k in enumerate(outs1.keys()): + loss = super().forward(outs1[k], outs2[k]) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}_{}".format(key, pair[ + 0], pair[1], self.maps_name, idx)] = loss[key] + else: + loss_dict["{}_{}_{}".format(self.name, self.maps_name[ + _c], idx)] = loss + + loss_dict = _sum_loss(loss_dict) + + return loss_dict + + +class DistillationKLDivLoss(KLDivLoss): + """ + """ + + def __init__(self, + model_name_pairs=[], + key=None, + multi_head=False, + dis_head='ctc', + maps_name=None, + name="kl_div"): + super().__init__() + assert isinstance(model_name_pairs, list) + self.key = key + self.multi_head = multi_head + self.dis_head = dis_head + self.model_name_pairs = self._check_model_name_pairs(model_name_pairs) + self.name = name + self.maps_name = self._check_maps_name(maps_name) + + def _check_model_name_pairs(self, model_name_pairs): + if not isinstance(model_name_pairs, list): + return [] + elif isinstance(model_name_pairs[0], list) and isinstance( + model_name_pairs[0][0], str): + return model_name_pairs + else: + return [model_name_pairs] + + def _check_maps_name(self, maps_name): + if maps_name is None: + return None + elif type(maps_name) == str: + return [maps_name] + elif type(maps_name) == list: + return [maps_name] + else: + return None + + def _slice_out(self, outs): + new_outs = {} + for k in self.maps_name: + if k == "thrink_maps": + new_outs[k] = outs[:, 0, :, :] + elif k == "threshold_maps": + new_outs[k] = outs[:, 1, :, :] + elif k == "binary_maps": + new_outs[k] = outs[:, 2, :, :] + else: + continue + return new_outs + def forward(self, predicts, batch): loss_dict = dict() for idx, pair in enumerate(self.model_name_pairs): @@ -141,6 +231,149 @@ def forward(self, predicts, batch): return loss_dict +class DistillationDKDLoss(DKDLoss): + """ + """ + + def __init__(self, + model_name_pairs=[], + key=None, + multi_head=False, + dis_head='ctc', + maps_name=None, + name="dkd", + temperature=1.0, + alpha=1.0, + beta=1.0): + super().__init__(temperature, alpha, beta) + assert isinstance(model_name_pairs, list) + self.key = key + self.multi_head = multi_head + self.dis_head = dis_head + self.model_name_pairs = self._check_model_name_pairs(model_name_pairs) + self.name = name + self.maps_name = self._check_maps_name(maps_name) + + def _check_model_name_pairs(self, model_name_pairs): + if not isinstance(model_name_pairs, list): + return [] + elif isinstance(model_name_pairs[0], list) and isinstance( + model_name_pairs[0][0], str): + return model_name_pairs + else: + return [model_name_pairs] + + def _check_maps_name(self, maps_name): + if maps_name is None: + return None + elif type(maps_name) == str: + return [maps_name] + elif type(maps_name) == list: + return [maps_name] + else: + return None + + def _slice_out(self, outs): + new_outs = {} + for k in self.maps_name: + if k == "thrink_maps": + new_outs[k] = outs[:, 0, :, :] + elif k == "threshold_maps": + new_outs[k] = outs[:, 1, :, :] + elif k == "binary_maps": + new_outs[k] = outs[:, 2, :, :] + else: + continue + return new_outs + + def forward(self, predicts, batch): + loss_dict = dict() + + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + if self.maps_name is None: + if self.multi_head: + # for nrtr dml loss + max_len = batch[3].max() + tgt = batch[2][:, 1:2 + + max_len] # [batch_size, max_len + 1] + + tgt = tgt.reshape([-1]) # batch_size * (max_len + 1) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, + dtype=tgt.dtype)) # batch_size * (max_len + 1) + + loss = super().forward( + out1[self.dis_head], out2[self.dis_head], tgt, + non_pad_mask) # [batch_size, max_len + 1, num_char] + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + else: + outs1 = self._slice_out(out1) + outs2 = self._slice_out(out2) + for _c, k in enumerate(outs1.keys()): + loss = super().forward(outs1[k], outs2[k]) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}_{}".format(key, pair[ + 0], pair[1], self.maps_name, idx)] = loss[key] + else: + loss_dict["{}_{}_{}".format(self.name, self.maps_name[ + _c], idx)] = loss + + loss_dict = _sum_loss(loss_dict) + + return loss_dict + + +class DistillationNRTRDMLLoss(DistillationDMLLoss): + """ + """ + + def forward(self, predicts, batch): + loss_dict = dict() + for idx, pair in enumerate(self.model_name_pairs): + out1 = predicts[pair[0]] + out2 = predicts[pair[1]] + if self.key is not None: + out1 = out1[self.key] + out2 = out2[self.key] + + if self.multi_head: + # for nrtr dml loss + max_len = batch[3].max() + tgt = batch[2][:, 1:2 + max_len] + tgt = tgt.reshape([-1]) + non_pad_mask = paddle.not_equal( + tgt, paddle.zeros( + tgt.shape, dtype=tgt.dtype)) + loss = super().forward(out1[self.dis_head], out2[self.dis_head], + non_pad_mask) + else: + loss = super().forward(out1, out2) + if isinstance(loss, dict): + for key in loss: + loss_dict["{}_{}_{}_{}".format(key, pair[0], pair[1], + idx)] = loss[key] + else: + loss_dict["{}_{}".format(self.name, idx)] = loss + + loss_dict = _sum_loss(loss_dict) + + return loss_dict + + class DistillationKLDivLoss(KLDivLoss): """ """
paddleocr中gen_lable.py对icdar2015数据转换有误 ![image](https://user-images.githubusercontent.com/119492593/212064964-c7db056f-3209-41f6-a9e0-daa5bc790c61.png) 我用gen_lable.py转换的数据是这样子的,我感觉好像是错误的 不应该是:ch4_training_images/img_1.jpg [{"transcription": "Genaxis Theatre", "points": [[377, 117], [463, 117], [465, 130], [378, 130]
2023-05-17T03:20:09
PaddlePaddle/PaddleOCR
11,405
PaddlePaddle__PaddleOCR-11405
[ "10649" ]
1b1dc7e44fa4cfbb83c53ee7a844d7f7b467b108
diff --git a/ppstructure/recovery/table_process.py b/ppstructure/recovery/table_process.py --- a/ppstructure/recovery/table_process.py +++ b/ppstructure/recovery/table_process.py @@ -253,18 +253,18 @@ def handle_table(self, html, doc): cols = get_table_columns(row) cell_col = 0 for col in cols: + if cell_col >= cols_len: + break + colspan = int(col.attrs.get('colspan', 1)) rowspan = int(col.attrs.get('rowspan', 1)) - cell_html = get_cell_html(col) if col.name == 'th': cell_html = "<b>%s</b>" % cell_html docx_cell = table.cell(cell_row, cell_col) - - while docx_cell.text != '': # Skip the merged cell - cell_col += 1 - docx_cell = table.cell(cell_row, cell_col) + if (cell_col + colspan -1) >= cols_len: + colspan -= 1 cell_to_merge = table.cell(cell_row + rowspan - 1, cell_col + colspan - 1)
PPstructure 表格识别错误 请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem **系统环境/System Environment:** Windows 10 家庭中文版 22H2: 1060MaxQ + CUDA 11.6 **版本号/Version:** Python:3.9, anaconda Paddle:paddlepaddle-gpu==2.5.1.post116 PaddleOCR: 2.7 问题相关组件/Related components:PPStructure 版面回复 **运行指令/Command Code:** python predict_system.py --image_dir=3.pdf --det_model_dir=inference/ch_PP-OCRv4_det_infer --rec_model_dir=inference/ch_PP-OCRv4_rec_infer --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_model_dir=inference/ch_ppstructure_mobile_v2.0_SLANet_infer --table_char_dict_path=../ppocr/utils/dict/table_structure_dict_ch.txt --layout_model_dir=inference/picodet_lcnet_x1_0_fgd_layout_cdla_infer --layout_dict_path=../ppocr/utils/dict/layout_dict/layout_cdla_dict.txt --vis_font_path=../doc/fonts/simfang.ttf --recovery=True --output=./output/ --use_gpu=False **完整报错/Complete Error Message:** PDF文档中的表格如下: ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/c09c9e6f-40ed-48b0-af03-09f87f66c326) 使用版面恢复,运行到最后出现 ppocr ERROR: error in layout recovery image:1.pdf, err msg: list index out of range ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/85848adc-abfe-488d-b3a1-7a1f237ec024) 能输出文件 ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/b80fe2ab-f87e-4497-9ef1-01b6717f5a86) 但是在 predict_system.py 292 行,调用 convert_info_docx(img, all_res, save_folder, img_name) 时出现问题 **问题定位:** 原因在于,识别的表格行列数出现了格式错误 从 predict_system.py 调用 convert_info_docx 跳转到 recovery_to_doc.py 63行 parser.handle_table(region['res']['html'], doc) 调用 recovery/table_process.py 第238行 def handle_table(self, html, doc) 识别到表格的 res: ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/838a52b7-c36e-4fc2-b695-548e8ada3cbd) 产生的 html 如下: ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/b7d1f2e6-d862-479f-be2a-b93ea048f3dc) 其中 handle_table 函数中,提取的 cols_len = 4 但是在表格的 html 中错误地出现了 colspan = 5 导致在函数中,出现了 list index out of range 的情况 ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/43161566/a29b29d3-0466-4e23-80bd-8c0b6d1e2587) 这个 colspan=5 是在表格分析的过程中产生的,我无法解决,需要求助
同样的问题,请问解决了吗? > 同样的问题,请问解决了吗? @xxcoco763 检查之后似乎是模型的问题,复杂表格下模型准确度不够,所以将行列数判断错了。可以看看 https://blog.csdn.net/weixin_44451785/article/details/105888966,暂时先手动处理一下表格 上面链接失效了,还有其他方法解决colspan, rowspan文章或办法吗? @nissansz 这个是链接格式没写好,把逗号后面的去掉就行了。或者直接搜 python对图片中的表格拆分 这个文章看过。有的时候表格线有断开,各种变形会导致投影法失效,有办法修补表格线吗? 另外,我用paddle训练表格识别模型,测试时,有的图片,colspan, rowspan会漏掉,这种是什么问题?怎么解决? ![image](https://github.com/PaddlePaddle/PaddleOCR/assets/41010669/91576c08-cd21-4b67-90e6-caaee51a8558) @nissansz 如果是扫描文档,表格断开的话可以考虑用 opencv 做图像增强,做一些图形学的膨胀之类的加粗表格线。然后如果是拍照的图片变形了可能就要考虑手动用PS之类的修补了。 然后 colspan rowspan 这个可能是面对复杂表格,模型不够准确的问题,如果要解决的话可能只能微调模型了,加自己的数据来训练,我也还没有找到很好的方法 膨胀之类的加粗表格线,怎麽判断已经找到全部表格线? 这个应该没有什么好办法了,要靠opencv+调参,不同文档可能还不一样。比如用霍夫变换什么的识别表格线,里面挺多参数要手动试的 应该是最后一行合并的时候报错了,在269行添加一个判断就好了 while docx_cell.text != '': # Skip the merged cell cell_col += 1 docx_cell = table.cell(cell_row, cell_col) if cell_row + rowspan - 1 == len(rows): rowspan -= 1 cell_to_merge = table.cell(cell_row + rowspan - 1, cell_col + colspan - 1) > 应该是最后一行合并的时候报错了,在269行添加一个判断就好了 while docx_cell.text != '': # Skip the merged cell cell_col += 1 docx_cell = table.cell(cell_row, cell_col) > > if cell_row + rowspan - 1 == len(rows): rowspan -= 1 > > cell_to_merge = table.cell(cell_row + rowspan - 1, cell_col + colspan - 1) 这是哪个py文件? > > 应该是最后一行合并的时候报错了,在269行添加一个判断就好了 while docx_cell.text != '': # Skip the merged cell cell_col += 1 docx_cell = table.cell(cell_row, cell_col) > > if cell_row + rowspan - 1 == len(rows): rowspan -= 1 > > cell_to_merge = table.cell(cell_row + rowspan - 1, cell_col + colspan - 1) > > 这是哪个py文件? > > ![微信截图_20231109193911](https://github.com/PaddlePaddle/PaddleOCR/assets/38805230/185840f3-df5a-498a-b184-f9fcd46e1459) table_process.py 这不是表格没识别出来,应该是在做还原到word文档的时候报错了,网上有其他的还原方法,可以去找下 我找到报错的问题是在表格提取成html时 colspan = int(col.attrs.get('colspan', 1)) rowspan = int(col.attrs.get('rowspan', 1)) 这两个数值跟正确的表格是不相符的,导致list out, 现在的解决办法是报错的时候rowspan = int(col.attrs.get('rowspan', 1)) - 1 或colspan = int(col.attrs.get('colspan', 1)) - 1,目前只遇到跨度多了1的情况 我训练表格模型,有时正常span,有时消失。而且效果总没有官方好。 更奇怪的是,最简单的表格,也很难准确 > 我训练表格模型,有时正常span,有时消失。而且效果总没有官方好。 更奇怪的是,最简单的表格,也很难准确 正常的表格识别没什么问题,有个问题是pdf转图片的时候 mat = fitz.Matrix(2, 2)用的是2,会导致图片模糊,你可以把图片打印出来看下,如果很模糊要改变缩放系数, if pm.width > 2000 or pm.height > 2000:也不能做这个缩小 有训练好的模型分享吗? > 有训练好的模型分享吗? 没有,我也没训练过 crnn训练,英文识别率差,且英文单词间的空格缺失,这个有办法吗? 有没有好的参数设置呢?
2023-12-21T10:13:54
PaddlePaddle/PaddleOCR
11,916
PaddlePaddle__PaddleOCR-11916
[ "10270" ]
c82dd6406eecb3c0840b26b055b436a883128307
diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py --- a/ppstructure/predict_system.py +++ b/ppstructure/predict_system.py @@ -58,7 +58,6 @@ def __init__(self, args): logger.warning( "When args.layout is false, args.ocr is automatically set to false" ) - args.drop_score = 0 # init model self.layout_predictor = None self.text_system = None @@ -93,6 +92,7 @@ def __call__(self, img, return_ocr_result_in_table=False, img_idx=0): 'all': 0 } start = time.time() + if self.image_orientation_predictor is not None: tic = time.time() cls_result = self.image_orientation_predictor.predict( @@ -108,6 +108,7 @@ def __call__(self, img, return_ocr_result_in_table=False, img_idx=0): img = cv2.rotate(img, cv_rotate_code[angle]) toc = time.time() time_dict['image_orientation'] = toc - tic + if self.mode == 'structure': ori_im = img.copy() if self.layout_predictor is not None: @@ -116,6 +117,20 @@ def __call__(self, img, return_ocr_result_in_table=False, img_idx=0): else: h, w = ori_im.shape[:2] layout_res = [dict(bbox=None, label='table')] + + # As reported in issues such as #10270 and #11665, the old + # implementation, which recognizes texts from the layout regions, + # has problems with OCR recognition accuracy. + # + # To enhance the OCR recognition accuracy, we implement a patch fix + # that first use text_system to detect and recognize all text information + # and then filter out relevant texts according to the layout regions. + text_res = None + if self.text_system is not None: + text_res, ocr_time_dict = self._predict_text(img) + time_dict['det'] += ocr_time_dict['det'] + time_dict['rec'] += ocr_time_dict['rec'] + res_list = [] for region in layout_res: res = '' @@ -126,6 +141,8 @@ def __call__(self, img, return_ocr_result_in_table=False, img_idx=0): else: x1, y1, x2, y2 = 0, 0, w, h roi_img = ori_im + bbox = [x1, y1, x2, y2] + if region['label'] == 'table': if self.table_system is not None: res, table_time_dict = self.table_system( @@ -135,67 +152,83 @@ def __call__(self, img, return_ocr_result_in_table=False, img_idx=0): time_dict['det'] += table_time_dict['det'] time_dict['rec'] += table_time_dict['rec'] else: - if self.text_system is not None: - if self.recovery: - wht_im = np.ones(ori_im.shape, dtype=ori_im.dtype) - wht_im[y1:y2, x1:x2, :] = roi_img - filter_boxes, filter_rec_res, ocr_time_dict = self.text_system( - wht_im) - else: - filter_boxes, filter_rec_res, ocr_time_dict = self.text_system( - roi_img) - time_dict['det'] += ocr_time_dict['det'] - time_dict['rec'] += ocr_time_dict['rec'] - - # remove style char, - # when using the recognition model trained on the PubtabNet dataset, - # it will recognize the text format in the table, such as <b> - style_token = [ - '<strike>', '<strike>', '<sup>', '</sub>', '<b>', - '</b>', '<sub>', '</sup>', '<overline>', - '</overline>', '<underline>', '</underline>', '<i>', - '</i>' - ] - res = [] - for box, rec_res in zip(filter_boxes, filter_rec_res): - rec_str, rec_conf = rec_res[0], rec_res[1] - for token in style_token: - if token in rec_str: - rec_str = rec_str.replace(token, '') - if not self.recovery: - box += [x1, y1] - if self.return_word_box: - word_box_content_list, word_box_list = cal_ocr_word_box(rec_str, box, rec_res[2]) - res.append({ - 'text': rec_str, - 'confidence': float(rec_conf), - 'text_region': box.tolist(), - 'text_word': word_box_content_list, - 'text_word_region': word_box_list - }) - else: - res.append({ - 'text': rec_str, - 'confidence': float(rec_conf), - 'text_region': box.tolist() - }) + if text_res is not None: + # Filter the text results whose regions intersect with the current layout bbox. + res = self._filter_text_res(text_res, bbox) + res_list.append({ 'type': region['label'].lower(), - 'bbox': [x1, y1, x2, y2], + 'bbox': bbox, 'img': roi_img, 'res': res, 'img_idx': img_idx }) + end = time.time() time_dict['all'] = end - start return res_list, time_dict + elif self.mode == 'kie': re_res, elapse = self.kie_predictor(img) time_dict['kie'] = elapse time_dict['all'] = elapse return re_res[0], time_dict + return None, None + def _predict_text(self, img): + filter_boxes, filter_rec_res, ocr_time_dict = self.text_system(img) + + # remove style char, + # when using the recognition model trained on the PubtabNet dataset, + # it will recognize the text format in the table, such as <b> + style_token = [ + '<strike>', '<strike>', '<sup>', '</sub>', '<b>', + '</b>', '<sub>', '</sup>', '<overline>', + '</overline>', '<underline>', '</underline>', '<i>', + '</i>' + ] + res = [] + for box, rec_res in zip(filter_boxes, filter_rec_res): + rec_str, rec_conf = rec_res[0], rec_res[1] + for token in style_token: + if token in rec_str: + rec_str = rec_str.replace(token, '') + if self.return_word_box: + word_box_content_list, word_box_list = cal_ocr_word_box(rec_str, box, rec_res[2]) + res.append({ + 'text': rec_str, + 'confidence': float(rec_conf), + 'text_region': box.tolist(), + 'text_word': word_box_content_list, + 'text_word_region': word_box_list + }) + else: + res.append({ + 'text': rec_str, + 'confidence': float(rec_conf), + 'text_region': box.tolist() + }) + return res, ocr_time_dict + + def _filter_text_res(self, text_res, bbox): + res = [] + for r in text_res: + box = r['text_region'] + rect = box[0][0], box[0][1], box[2][0], box[2][1] + if self._has_intersection(bbox, rect): + res.append(r) + return res + + def _has_intersection(self, rect1, rect2): + x_min1, y_min1, x_max1, y_max1 = rect1 + x_min2, y_min2, x_max2, y_max2 = rect2 + if x_min1 > x_max2 or x_max1 < x_min2: + return False + if y_min1 > y_max2 or y_max1 < y_min2: + return False + return True + def save_structure_res(res, save_folder, img_name, img_idx=0): excel_save_folder = os.path.join(save_folder, img_name)
PPStructure版面分析得到的结果,bbox里OCR的结果缺失最后一行 请提供下述完整信息以便快速定位问题/Please provide the following information to quickly locate the problem - 系统环境/System Environment: - 版本号/Version:Paddle: PaddleOCR: 问题相关组件/Related components: - 运行指令/Command Code: - 完整报错/Complete Error Message: ![屏幕截图 2023-06-30 185505](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/f2159e3f-4075-4e69-8fd2-ebd6e2f301d2) 以如图页面版面分析的文本块可视化结果所示,该文本块OCR结果缺失最后一行 ![屏幕截图 2023-06-30 185951](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/a3b1568f-c0b5-498f-9fa9-4fe4b4f6c650) 整体上看,版面分析的效果还不错,但是为什么文本块的OCR结果这么差,许多文本块的OCR结果缺失,未缺失的也缺最后一行
经过简单可视化,显示layout结果的每个块的图,很多图里明显有文本,但是OCR结果要么没有识别出任何文字,要么就缺行 ![屏幕截图 2023-07-05 150940](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/66e2a7b4-abf3-4128-8c08-d2ffe890a59e) 有相同的问题,同问 https://github.com/PaddlePaddle/PaddleOCR/issues/10097 我将左下角的文本块的图单独拿出来,使用paddleocr识别,结果竟然识别不出来这种最简单的case纯文本的图 ![屏幕截图 2023-07-05 160016](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/2c7fb269-0e81-4d41-a1c9-ba50b239b632) ![result 0](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/0fa2d171-1c80-41f5-9874-128dc1cd715e) 经过测试,结论是ocr性能与图片分辨率强相关,pdf转图片的过程中将图片提升每个尺寸的缩放系数,生成分辨率提高的图像,ocr效果提升。但是具体效果与缩放系数相关,这里有推荐的设置吗? `import fitz # fitz就是pip install PyMuPDF def pdf2img(pdf_path, image_dir): pdfDoc = fitz.open(pdf_path) for pg in range(pdfDoc.pageCount): page = pdfDoc[pg] rotate = int(0) # 每个尺寸的缩放系数为4,这将为我们生成分辨率提高4的图像。 # 此处若是不做设置,默认图片大小为:792X612, dpi=96 zoom_x = 4 # (1.33333333-->1056x816) (2-->1584x1224) zoom_y = 4 mat = fitz.Matrix(zoom_x, zoom_y).preRotate(rotate) pix = page.getPixmap(matrix=mat, alpha=False) if not os.path.exists(image_path): # 判断存放图片的文件夹是否存在 os.makedirs(image_path) # 若图片文件夹不存在就创建 pix.writePNG(image_path + '/' + 'images_%s.png' % pg) # 将图片写入指定的文件夹内 ` 经过实验,PPStructure版面分析的过程中,先将PDF转换为图片,其中不同分辨率会导致ocr的效果不稳定,底层的ocr模型对分辨率的鲁棒性很差。 以下为原始PDF文档: [基于会话的推荐方法综述.pdf](https://github.com/PaddlePaddle/PaddleOCR/files/11963657/default.pdf) 以第三页为例测试, - 直接使用paddleocr可以识别所有文字: ![result_1_ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/c9349ace-650a-4de0-8851-24efb5f88a11) - 使用PPStructure,版面分析后OCR结果很差: ![result_1](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/c555d1c0-38a0-416b-a781-851f113d0f90) - PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: ![result_1 0](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/c9dc436c-b834-4a5a-a8a5-c373e28bc395) - 单独用paddleocr识别,完全识别不出来: ![result_1 0 _ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/b8c4e28b-9bca-4eca-9015-cab489f0404b) PDF转图片每个尺寸缩放系数为2测试, - 直接使用paddleocr可以识别所有文字: ![result_2_ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/e2acbbef-0ce3-4bc3-94c3-976fd6be03ba) - 使用PPStructure,版面分析后OCR结果有提升,但是左列中间文本块最后一行识别出错,最下面两个文本块丢失最后一行: ![result_2](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/f8f30ccc-ab6e-42ae-844d-d9ff61bc1558) - PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: ![result_2 0](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/074721ac-bc63-4f46-8c34-401c7b981783) - 单独用paddleocr识别,丢失最后一行: ![result_2 0 _ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/9fd2dd3a-1333-4f11-9e72-6609ce3bf340) PDF转图片每个尺寸缩放系数为4测试, - 直接使用paddleocr可以识别所有文字: ![result_4_ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/30aa8ff5-a394-4fb7-a6d9-4b938f60ba43) - 使用PPStructure,版面分析后OCR结果有提升,不过之前能识别出来的标题,更高的分辨率反而识别不出来了 ![result_4](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/e6312049-2bbc-47da-90a2-87d2fc69c8b6) - PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: ![result_4 0](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/3fd1c785-7766-4896-aaf5-38365cf50daa) - 单独用paddleocr识别,基本能够识别: ![result_4 0 _ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/8c6fdbba-87dd-405a-8633-a56ab88494f5) - PDF转图片每个尺寸缩放系数为8测试, - 直接使用paddleocr可以识别所有文字: ![result_8_ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/1a557667-451f-4e37-b140-98d25eff1c4c) - 使用PPStructure,版面分析后OCR结果基本正确: ![result_8](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/1e6eb838-0a2a-4124-91b5-078c2c8bfe71) - PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: ![result_8 0](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/68db005c-41c7-47d5-9695-2f2d1777df7d) - 单独用paddleocr识别,基本能够识别: ![result_8 0 _ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/ed541e03-e0d3-4df8-8ce7-8b0a41cc7a3b) 虽然目前可以在PDF转图片的时候采用更大的缩放系数,但是会带来更长的时间消耗。而每个尺寸缩放的实验中,整页PDF直接用paddleocr都基本能识别所有文字,但是用PPStructure版面分析的pipeline中进行的OCR却效果很差,这里该如何解决? > 经过实验,PPStructure版面分析的过程中,先将PDF转换为图片,其中不同分辨率会导致ocr的效果不稳定,底层的ocr模型对分辨率的鲁棒性很差。 > > 以下为原始PDF文档: [基于会话的推荐方法综述.pdf](https://github.com/PaddlePaddle/PaddleOCR/files/11963657/default.pdf) > > 以第三页为例测试, > > * 直接使用paddleocr可以识别所有文字: > ![result_1_ocr](https://user-images.githubusercontent.com/25731261/251328675-c9349ace-650a-4de0-8851-24efb5f88a11.png) > * 使用PPStructure,版面分析后OCR结果很差: > ![result_1](https://user-images.githubusercontent.com/25731261/251330792-c555d1c0-38a0-416b-a781-851f113d0f90.png) > * PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: > ![result_1 0](https://user-images.githubusercontent.com/25731261/251330241-c9dc436c-b834-4a5a-a8a5-c373e28bc395.png) > * 单独用paddleocr识别,完全识别不出来: > ![result_1 0 _ocr](https://user-images.githubusercontent.com/25731261/251330374-b8c4e28b-9bca-4eca-9015-cab489f0404b.png) > > PDF转图片每个尺寸缩放系数为2测试, > > * 直接使用paddleocr可以识别所有文字: > ![result_2_ocr](https://user-images.githubusercontent.com/25731261/251330915-e2acbbef-0ce3-4bc3-94c3-976fd6be03ba.png) > * 使用PPStructure,版面分析后OCR结果有提升,但是左列中间文本块最后一行识别出错,最下面两个文本块丢失最后一行: > ![result_2](https://user-images.githubusercontent.com/25731261/251331223-f8f30ccc-ab6e-42ae-844d-d9ff61bc1558.png) > * PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: > ![result_2 0](https://user-images.githubusercontent.com/25731261/251331547-074721ac-bc63-4f46-8c34-401c7b981783.png) > * 单独用paddleocr识别,丢失最后一行: > ![result_2 0 _ocr](https://user-images.githubusercontent.com/25731261/251331703-9fd2dd3a-1333-4f11-9e72-6609ce3bf340.png) > > PDF转图片每个尺寸缩放系数为4测试, > > * 直接使用paddleocr可以识别所有文字: > ![result_4_ocr](https://user-images.githubusercontent.com/25731261/251331811-30aa8ff5-a394-4fb7-a6d9-4b938f60ba43.png) > * 使用PPStructure,版面分析后OCR结果有提升,不过之前能识别出来的标题,更高的分辨率反而识别不出来了 > ![result_4](https://user-images.githubusercontent.com/25731261/251354753-e6312049-2bbc-47da-90a2-87d2fc69c8b6.png) > * PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: > ![result_4 0](https://user-images.githubusercontent.com/25731261/251355159-3fd1c785-7766-4896-aaf5-38365cf50daa.png) > * 单独用paddleocr识别,基本能够识别: > ![result_4 0 _ocr](https://user-images.githubusercontent.com/25731261/251355681-8c6fdbba-87dd-405a-8633-a56ab88494f5.png) > * PDF转图片每个尺寸缩放系数为8测试, > * 直接使用paddleocr可以识别所有文字: > ![result_8_ocr](https://user-images.githubusercontent.com/25731261/251355785-1a557667-451f-4e37-b140-98d25eff1c4c.png) > * 使用PPStructure,版面分析后OCR结果基本正确: > ![result_8](https://user-images.githubusercontent.com/25731261/251356129-1e6eb838-0a2a-4124-91b5-078c2c8bfe71.png) > * PPStructure结果中的'img'字段为版面分析块的图,将左下角的文本块对应的图保存: > ![result_8 0](https://user-images.githubusercontent.com/25731261/251356890-68db005c-41c7-47d5-9695-2f2d1777df7d.png) > * 单独用paddleocr识别,基本能够识别: > ![result_8 0 _ocr](https://user-images.githubusercontent.com/25731261/251356978-ed541e03-e0d3-4df8-8ce7-8b0a41cc7a3b.png) > > 虽然目前可以在PDF转图片的时候采用更大的缩放系数,但是会带来更长的时间消耗。而每个尺寸缩放的实验中,整页PDF直接用paddleocr都基本能识别所有文字,但是用PPStructure版面分析的pipeline中进行的OCR却效果很差,这里该如何解决? - 手动截取一张包含左下角文本块的图,周围包含空白,以防OCR的放大预处理导致像素失真,单独用paddleocr识别,基本能识别: ![屏幕截图 2023-07-06 151852](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/6909fdce-717d-49cc-ac53-6675feab2108) ![result 0 _screen_ocr](https://github.com/PaddlePaddle/PaddleOCR/assets/25731261/2e00dc43-2968-474a-9191-f3f6927075c4) 推测为PPStructure得到了版面分析的结果后,对各个块的图进行OCR,有一个放大的预处理,导致像素失真,而OCR对缩放像素失真的鲁棒性很差。 那么如何在OCR的对图片预处理修改,不进行导致像素失真的缩放操作?有什么参数可以控制OCR对输入图片的的resize吗? make mark can someone share sample code to get layout analysis using different models present at https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/models_list_en.md > can someone share sample code to get layout analysis using different models present at https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/models_list_en.md you can download model, then change model dir: https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/layout/README.md#72-model-inference also you can use parameter layout_model_dir of method PPStructure: https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.6/ppstructure/docs/quickstart_en.md#223-layout-analysis @lycfight , its not working I shared the issue here - https://github.com/PaddlePaddle/PaddleOCR/issues/10476 please take a look mark mark 我也遇到了同样的问题,请问大佬PDF转图片然后进行缩放是如何做的? 我是将PDF用fitz读取获得每一页,然后将每个Page 获取他们的 pixmap,再通过iobytes用cv 获取到图片。 > 我也遇到了同样的问题,请问大佬PDF转图片然后进行缩放是如何做的? 我是将PDF用fitz读取获得每一页,然后将每个Page 获取他们的 pixmap,再通过iobytes用cv 获取到图片。 但是使用V3的模型,PPstructure在OCR的过程中,会比 v4的模型效果好一些。 > 我也遇到了同样的问题,请问大佬PDF转图片然后进行缩放是如何做的? 我是将PDF用fitz读取获得每一页,然后将每个Page 获取他们的 pixmap,再通过iobytes用cv 获取到图片。 就是PPStructure内置的OCR的缩放问题导致的,这得等官方解决。我采取了折中的办法,只检测出各块bbox位置,不使用PPStructure内置的OCR,然后用另外单独的PaddleOCR解析出所有行的bbox,结合一下过滤 > > 我也遇到了同样的问题,请问大佬PDF转图片进行缩放是如何做的?我等于PDF用fitz读取每一页,然后将每一页获取他们的像素图,再通过iobytes用cv获取到图片。 > > 就是PPStructure内置的OCR的缩放问题导致的,这得等官方解决。我采取了折中的办法,只检测出各块bbox位置,不使用PPStructure内置的OCR,用另外单独的PaddleOCR解析出所有行的bbox,结合一下过滤 请问内置的ocr的缩放参数是哪一个?我也遇到这个问题,不知道如何修改 > > > 我也遇到了同样的问题,请问大佬PDF转图片进行缩放是如何做的?我等于PDF用fitz读取每一页,然后将每一页获取他们的像素图,再通过iobytes用cv获取到图片。 > > > > > > 就是PPStructure内置的OCR的缩放问题导致的,这得等官方解决。我采取了折中的办法,只检测出各块bbox位置,不使用PPStructure内置的OCR,用另外单独的PaddleOCR解析出所有行的bbox,结合一下过滤 > > 请问内置的ocr的缩放参数是哪一个?我也遇到这个问题,不知道如何修改 缩放只是PPStructure内置版面分析后处理的问题,有时OCR识别不出来的情况,放大有可能能识别出来一点,但也不太稳定。我没用缩放,参考上一条回复,OCR单独做,和layout的各类块结合一下过滤 我也遇到了同样的问题,参考 @lycfight (感谢🙏)的思路对 PaddleOCR 进行了非官方修复: - 分支:https://github.com/RussellLuo/PaddleOCR/tree/hotfix-ppstructure-ocr - 修改:https://github.com/PaddlePaddle/PaddleOCR/compare/release/2.7...RussellLuo:PaddleOCR:hotfix-ppstructure-ocr 安装该修复版本: ```bash pip install git+https://github.com/RussellLuo/PaddleOCR.git@hotfix-ppstructure-ocr ``` 使用说明: ```bash # 官方命令:paddleocr --image_dir=ppstructure/docs/table/1.png --type=structure paddleocr --image_dir=ppstructure/docs/table/1.png --type=structurex ``` 如果大家也遇到了这个问题,希望这个修复版本能够起到缓解作用。最终,还是期待官方的解决方案! > 我也遇到了同样的问题,参考 @lycfight (感谢🙏)的思路对 PaddleOCR 进行了非官方修复: > > * 分支:https://github.com/RussellLuo/PaddleOCR/tree/hotfix-ppstructure-ocr > * 修改:[release/2.7...RussellLuo:PaddleOCR:hotfix-ppstructure-ocr](https://github.com/PaddlePaddle/PaddleOCR/compare/release/2.7...RussellLuo:PaddleOCR:hotfix-ppstructure-ocr) > > 安装该修复版本: > > ```shell > pip install git+https://github.com/RussellLuo/PaddleOCR.git@hotfix-ppstructure-ocr > ``` > > 使用说明: > > ```shell > # 官方命令:paddleocr --image_dir=ppstructure/docs/table/1.png --type=structure > paddleocr --image_dir=ppstructure/docs/table/1.png --type=structurex > ``` > > 如果大家也遇到了这个问题,希望这个修复版本能够起到缓解作用。最终,还是期待官方的解决方案! 大佬你修复的版本PyMuPDF是没有版本限制了吗?我现在有个东西要用1.23.1的,但是PaddleOCR会冲突。 mark 推测是PDF转image时的缩放系数导致图片分辨率过低的问题,修改ppcor.utils.utility.py 第109行开始: elif os.path.basename(img_path)[-3:].lower() == 'pdf': import fitz from PIL import Image, ImageFilter imgs = [] with fitz.open(img_path) as pdf: for pg in range(0, pdf.page_count): page = pdf[pg] ## 提高PDF转换为图片时的分辨率,您可以通过调整fitz.Matrix中的缩放因子来实现。目前,代码中的缩放因子设置为2,这意味着图片的分辨率将是PDF的两倍。 # mat = fitz.Matrix(2, 2) mat = fitz.Matrix(1, 1) pm = page.get_pixmap(matrix=mat, alpha=False) # if width and height > 2000 pixels, don't enlarge the image if pm.width > 2000 and pm.height > 2000: pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False) else: scale = int(max(2000/pm.width,2000/pm.height)) pm = page.get_pixmap(matrix=fitz.Matrix(scale, scale), alpha=False) print('convert PDF to image with width {} , height {}'.format(pm.width,pm.height)) img = Image.frombytes("RGB", [pm.width, pm.height], pm.samples) # 应用锐化滤波器 img = img.filter(ImageFilter.SHARPEN) img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) imgs.append(img) return imgs, False, True > 如果大家也遇到了这个问题,希望这个修复版本能够起到缓解作用。最终,还是期待官方的解决方案! PaddleOCR 正在经历从一个企业开源项目转型为一个完全社区驱动的项目的过程中。以后,社区就是官方。 😄 现在,社区有一个解决 long standing issues 的活动: #11906 ,如果你有兴趣,可以试着 upstream 你的改动到 PaddleOCR 仓库( main 分支 )。 @jzhang533 感谢认可,很荣幸可以给 PaddleOCR 社区作贡献!我尝试提交了一个 PR:#11916 🚀
2024-04-11T07:07:38
vispy/vispy
151
vispy__vispy-151
[ "144" ]
0b2daaa0c1e263e43e85a549f740fe269666d017
diff --git a/make/make.py b/make/make.py --- a/make/make.py +++ b/make/make.py @@ -168,8 +168,12 @@ def flake(self, arg): """ Run flake8 to find style inconsistencies. """ os.chdir(ROOT_DIR) sys.argv[1:] = ['vispy', 'examples', 'make'] - from flake8.main import main - main() + try: + from flake8.main import main + except ImportError: + print('Skipping flake8 test, flake8 not installed') + else: + main() def images(self, arg): """ Create images (screenshots). Subcommands: diff --git a/vispy/app/backends/__init__.py b/vispy/app/backends/__init__.py --- a/vispy/app/backends/__init__.py +++ b/vispy/app/backends/__init__.py @@ -62,14 +62,13 @@ def has_qt(requires_uic=False, return_which=False): def has_pyglet(return_which=False): try: - import pyglet # noqa + from . import _pyglet # noqa except: which = None has = False - pass else: has = True - which = 'pyglet ' + str(pyglet.version) + which = 'pyglet ' + str(_pyglet.version) if return_which: out = (has, which) else: diff --git a/vispy/app/backends/_pyglet.py b/vispy/app/backends/_pyglet.py --- a/vispy/app/backends/_pyglet.py +++ b/vispy/app/backends/_pyglet.py @@ -6,9 +6,16 @@ vispy backend for pyglet. """ -# absolute import is important here, since this module is called pyglet :) from __future__ import division +from distutils.version import LooseVersion +import pyglet +version = pyglet.version + +if LooseVersion(version) < LooseVersion('1.2'): + raise ImportError('Pyglet version too old (%s), need >= 1.2' + % pyglet.version) + import pyglet.window import pyglet.app import pyglet.clock
Pyglet need 1.2 to run In the Pyglet backend we use `platform_event_loop` which is only available since 1.2 which as alpha status. We cannot use the older `event_loop` since it does not allow processing events without entering the event loop. My proposal is to check the pyglet version and fail the pyglet backend import with a useful warning. Having pyglet is useful, because Travis uses it; I was just in a situation where I was unable to reproduce Travis' errors without Pyglet. BTW: In Travis we install from `http://pyglet.googlecode.com/archive/tip.zip`.
2014-02-27T20:32:27
vispy/vispy
154
vispy__vispy-154
[ "153" ]
6b1c6c3b02af75b688f54bad3d07c76c9c526e3c
diff --git a/vispy/util/__init__.py b/vispy/util/__init__.py --- a/vispy/util/__init__.py +++ b/vispy/util/__init__.py @@ -7,5 +7,6 @@ """ from .misc import (_TempDir, is_string, parse_command_line_arguments, # noqa - config, sys_info) # noqa + config, sys_info, assert_in, assert_not_in, # noqa + assert_is) # noqa from ._logging import logger, set_log_level, use_log_level # noqa diff --git a/vispy/util/misc.py b/vispy/util/misc.py --- a/vispy/util/misc.py +++ b/vispy/util/misc.py @@ -257,3 +257,60 @@ def sys_info(fname=None, overwrite=False): with open(fname, 'w') as fid: fid.write(out) return out + + +# Adapted from Python's unittest2 (which is wrapped by nose) +# http://docs.python.org/2/license.html +def _safe_rep(obj, short=False): + """Helper for assert_* ports""" + try: + result = repr(obj) + except Exception: + result = object.__repr__(obj) + if not short or len(result) < 80: + return result + return result[:80] + ' [truncated]...' + + +def _safe_str(obj): + """Helper for assert_* ports""" + try: + return str(obj) + except Exception: + return object.__str__(obj) + + +def _format_msg(msg, std_msg): + """Helper for assert_* ports""" + if msg is None: + msg = std_msg + else: + try: + msg = '%s : %s' % (std_msg, msg) + except UnicodeDecodeError: + msg = '%s : %s' % (_safe_str(std_msg), _safe_str(msg)) + + +def assert_in(member, container, msg=None): + """Backport for old nose.tools""" + if member in container: + return + std_msg = '%s not found in %s' % (_safe_rep(member), _safe_rep(container)) + msg = _format_msg(msg, std_msg) + raise AssertionError(msg) + + +def assert_not_in(member, container, msg=None): + """Backport for old nose.tools""" + if member not in container: + return + std_msg = '%s found in %s' % (_safe_rep(member), _safe_rep(container)) + msg = _format_msg(msg, std_msg) + raise AssertionError(msg) + + +def assert_is(expr1, expr2, msg=None): + """Backport for old nose.tools""" + if expr1 is not expr2: + std_msg = '%s is not %s' % (_safe_rep(expr1), _safe_rep(expr2)) + raise AssertionError(_format_msg(msg, std_msg))
diff --git a/vispy/app/tests/test_app.py b/vispy/app/tests/test_app.py --- a/vispy/app/tests/test_app.py +++ b/vispy/app/tests/test_app.py @@ -11,6 +11,7 @@ ElementBuffer) from vispy.gloo.shader import VertexShader, FragmentShader, ShaderError from vispy.gloo import _screenshot +from vispy.util import assert_in, assert_is def on_nonexist(self, *args): @@ -68,7 +69,7 @@ def _test_application(backend): app.process_events() if backend is not None: # "in" b/c "qt" in "PySide (qt)" - assert_true(backend in app.backend_name) + assert_in(backend, app.backend_name) print(app) # test __repr__ # Canvas @@ -76,7 +77,7 @@ def _test_application(backend): # Use "with" statement so failures don't leave open window # (and test context manager behavior) with Canvas(title='me', app=app, show=True, position=pos) as canvas: - assert_true(canvas.app is app) + assert_is(canvas.app, app) assert_true(canvas.native) print(canvas.size >= (1, 1)) canvas.resize(90, 90) diff --git a/vispy/util/tests/test_logging.py b/vispy/util/tests/test_logging.py --- a/vispy/util/tests/test_logging.py +++ b/vispy/util/tests/test_logging.py @@ -3,7 +3,8 @@ import logging from vispy import app -from vispy.util import logger, use_log_level, sys_info, _TempDir +from vispy.util import (logger, use_log_level, sys_info, _TempDir, assert_in, + assert_not_in) temp_dir = _TempDir() @@ -26,14 +27,14 @@ def test_debug_logging(): a.use() a.quit() assert_equal(len(l), 1) - assert_true('vispy.app.application' in l[0]) + assert_in('vispy.app.application', l[0]) with use_log_level('debug', record=True) as l: a = app.Application() a.use() a.quit() assert_equal(len(l), 1) - assert_true('vispy.app.application' in l[0]) + assert_in('vispy.app.application', l[0]) with use_log_level('debug', 'foo', True) as l: a = app.Application() @@ -46,7 +47,7 @@ def test_debug_logging(): a.use() a.quit() assert_equal(len(l), 1) - assert_true('vispy.app.application' not in l[0]) + assert_not_in('vispy.app.application', l[0]) def test_sys_info():
Why is there no asser_in in nose.tools on py2.7? This makes me jump through hoops when writing tests.
`from nose.tools import assert_in` fails Don't know. In a different package, we wrote a wrapper and put it in the equivalent of `util/misc.py`. I'll open a PR quickly. I assume the hoops you mean are having to write `assert_true(x in y)` as opposed to `assert_in(x, y)`? Yes. The latter gives much better feedback when it fails, in contrast to "False is not True" :) Backporting now...
2014-02-28T17:23:37
vispy/vispy
212
vispy__vispy-212
[ "211" ]
5b686ff3750744af5709aa76692ca47ac7c201a0
diff --git a/vispy/app/backends/_glfw.py b/vispy/app/backends/_glfw.py --- a/vispy/app/backends/_glfw.py +++ b/vispy/app/backends/_glfw.py @@ -80,7 +80,7 @@ } _VP_GLFW_ALL_WINDOWS = [] -_VP_GLFW_DO_DRAW = False +_VP_GLFW_DO_DRAW = [] MOD_KEYS = [keys.SHIFT, keys.ALT, keys.CONTROL, keys.META] @@ -100,6 +100,7 @@ class ApplicationBackend(BaseApplicationBackend): def __init__(self): BaseApplicationBackend.__init__(self) self._timers = list() + self._running = False def _add_timer(self, timer): if timer not in self._timers: @@ -109,20 +110,27 @@ def _vispy_get_backend_name(self): return 'Glfw' def _vispy_process_events(self): - wins = _get_glfw_windows() - for win in wins: - glfw.glfwPollEvents(win._id) - + #wins = _get_glfw_windows() + #for win in wins: + # glfw.glfwPollEvents(win._id) + glfw.glfwPollEvents() + while _VP_GLFW_DO_DRAW: + win = _VP_GLFW_DO_DRAW.pop(0) + win._on_draw() + def _vispy_run(self): win = _get_glfw_windows(check=True)[0] - global _VP_GLFW_DO_DRAW - while win._id is not None and not glfw.glfwWindowShouldClose(win._id): - if _VP_GLFW_DO_DRAW: - win._on_draw() - glfw.glfwPollEvents() - self._vispy_quit() + self._running = True + while (self._running and + win._id is not None and + not glfw.glfwWindowShouldClose(win._id)): + self._vispy_process_events() + self._vispy_quit() # to clean up def _vispy_quit(self): + # Mark as quit + self._running = False + # Close windows wins = _get_glfw_windows() for win in wins: win._vispy_close() @@ -227,10 +235,9 @@ def _vispy_update(self): # Invoke a redraw, passing it on to the canvas if self._vispy_canvas is None or self._id is None: return - # XXX HACKISH SOLUTION - global _VP_GLFW_DO_DRAW - _VP_GLFW_DO_DRAW = True - #self._on_draw(self._id) + # Mark that this window wants to be painted on the next loop iter + if self not in _VP_GLFW_DO_DRAW: + _VP_GLFW_DO_DRAW.append(self) def _vispy_close(self): # Force the window or widget to shut down diff --git a/vispy/app/backends/_glut.py b/vispy/app/backends/_glut.py --- a/vispy/app/backends/_glut.py +++ b/vispy/app/backends/_glut.py @@ -86,16 +86,29 @@ def _vispy_get_backend_name(self): return 'Glut' def _vispy_process_events(self): - pass # not possible? - + # Determine what function to use, if any + if hasattr(glut, 'glutMainLoopEvent') and bool(glut.glutMainLoopEvent): + func = glut.glutMainLoopEvent + elif hasattr(glut, 'glutCheckLoop') and bool(glut.glutCheckLoop): + func = glut.glutCheckLoop # Darwin + else: + self._vispy_process_events = lambda: None + raise RuntimeError('Your implementation of GLUT does not allow ' + + 'interactivity. Consider installing freeglut.') + # Set for future use, and call! + self._vispy_process_events = func + func() + def _vispy_run(self): self._vispy_get_native_app() # Force exist return glut.glutMainLoop() def _vispy_quit(self): - global _VP_GLUT_ALL_WINDOWS - for win in _VP_GLUT_ALL_WINDOWS: - win._vispy_close() + if hasattr(glut, 'glutLeaveMainLoop') and bool(glut.glutLeaveMainLoop): + glut.glutLeaveMainLoop() + else: + for win in _VP_GLUT_ALL_WINDOWS: + win._vispy_close() def _vispy_get_native_app(self): # HiDPI support for retina display @@ -121,6 +134,12 @@ def _vispy_get_native_app(self): glut.GLUT_DOUBLE | glut.GLUT_STENCIL | glut.GLUT_DEPTH) + # Prevent exit when closing window + try: + glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE, + glut.GLUT_ACTION_CONTINUE_EXECUTION) + except Exception: + pass self._initialized = True return glut @@ -137,7 +156,8 @@ def __init__(self, name='glut window', *args, **kwargs): # Cache of modifiers so we can send modifiers along with mouse motion self._modifiers_cache = () - + self._closed = False # Keep track whether the widget is closed + # Note: this seems to cause the canvas to ignore calls to show() # about half of the time. # glut.glutHideWindow() # Start hidden, like the other backends @@ -221,6 +241,8 @@ def _vispy_update(self): def _vispy_close(self): # Force the window or widget to shut down + if self._closed: + return glut.glutDestroyWindow(self._id) def _vispy_get_size(self): @@ -243,8 +265,9 @@ def on_resize(self, w, h): def on_close(self): if self._vispy_canvas is None: return + self._closed = True self._vispy_canvas.events.close() - + def on_draw(self, dummy=None): if self._vispy_canvas is None: return diff --git a/vispy/app/backends/_pyglet.py b/vispy/app/backends/_pyglet.py --- a/vispy/app/backends/_pyglet.py +++ b/vispy/app/backends/_pyglet.py @@ -87,8 +87,12 @@ def _vispy_get_backend_name(self): return 'Pyglet' def _vispy_process_events(self): - # todo: note that this does not actually process paint events :( - return pyglet.app.platform_event_loop.step(0.0) + # pyglet.app.platform_event_loop.step(0.0) + pyglet.clock.tick() + for window in pyglet.app.windows: + window.switch_to() + window.dispatch_events() + window.dispatch_event('on_draw') def _vispy_run(self): return pyglet.app.run()
nosetests stalled processes I've been trying to update some packages on my machine and I end up breaking qt installation which is no more available. Now (I think this is the only change), the nosetests always stalls: ``` Failure: RuntimeError (the sip module implements API v11.0 but the PyQt4.QtCore module requires API v10.1) ... ERROR Test that all events seem to be emitted. ... ok Test that the keymap contains all keys supported by vispy. ... ok Test that all _vispy_x methods are there. ... ok Test that all events seem to be emitted. ... ok Test that the keymap contains all keys supported by vispy. ... ok Test that all _vispy_x methods are there. ... ok Test that all events seem to be emitted. ... ok Test that the keymap contains all keys supported by vispy. ... ok Test that all _vispy_x methods are there. ... ok Failure: SkipTest (Skipping test: __init__: Requires QT) ... SKIP: Skipping test: __init__: Requires QT Failure: SkipTest (Skipping test: __init__: Requires QT) ... SKIP: Skipping test: __init__: Requires QT Failure: SkipTest (Skipping test: __init__: Requires QT) ... SKIP: Skipping test: __init__: Requires QT Test that all events seem to be emitted. ... ok Test that the keymap contains all keys supported by vispy. ... ok Test that all _vispy_x methods are there. ... ok Embed Canvas via Qt Designer ... SKIP: Skipping test: test_qt_designer: Requires QT with UIC Test desktop GL backend for basic functionality. ... ``` At this point I have an opened vispy window (that I cannot close) and I have to issue a `ctrl-C` for the process to go on. Then I get `FAILED (SKIP=4, errors=1)`
I also have this. It's actually different from the error that's happening in Almar's PR, which has something to do with threading/timers, I think. This is more likely to do with Pyglet. Is Pyglet your backend? (`vispy.sys_info()` should say) I can confirm that if I change the order preference to put `pyglet` in first ahead of Qt on my system, I get the same hangup, but _not_ if Qt is first (default) or `glfw`. So I'm pretty sure it's a pyglet-related error. ``` File "/home/larsoner/custombuilds/vispy/vispy/gloo/gl/tests/test_basics.py", line 28, in test_basics_desktop _test_basics('desktop') File "/home/larsoner/custombuilds/vispy/vispy/gloo/gl/tests/test_basics.py", line 64, in _test_basics context.wait() File "/home/larsoner/custombuilds/vispy/vispy/app/_util.py", line 98, in wait self.app.process_events() File "/home/larsoner/custombuilds/vispy/vispy/app/application.py", line 68, in process_events return self._backend._vispy_process_events() File "/home/larsoner/custombuilds/vispy/vispy/app/backends/_pyglet.py", line 91, in _vispy_process_events return pyglet.app.platform_event_loop.step(0.0) File "/home/larsoner/.local/lib/python2.7/site-packages/pyglet/app/xlib.py", line 109, in step if device.poll(): File "/home/larsoner/.local/lib/python2.7/site-packages/pyglet/app/xlib.py", line 91, in poll def poll(self): KeyboardInterrupt ``` That's where the error is if I ctrl-C. Weird Here is the minimal code to reproduce the hang: ``` from vispy.app import app_opengl_context with app_opengl_context(backend='pyglet') as context: context.wait() ``` What is this wait function doing ? https://github.com/vispy/vispy/blob/master/vispy/app/_util.py#L92 Looks like the timer is never actually running. Does Pyglet need to be in the `app.run()` mode for the timer to work or something? I checked, and it's stuck in that `while` loop. But why do we start a timer in the first place if we don't use it ? It looks like it's to make sure the event loop is running. But I think only `Qt` really has a functional event loop unless `app.run()` is called... @rougier we should talk to @almarklein once he's back on Gitter about this, it'll maybe be easier to sort it out. Ok. As I was saying in the chat; both glut and pyglet are not very suited for interactive use. I have seen that running `app.process_events` for pyglet does not really process events at all. Till recently this has not been a problem for the tests, but apparently now it has. We now have this `requires_non_glut` decorator. Maybe we should create a `requires_interactive_backend` decorator instead, which is not glut or pyglet. But pyglet and glut can process events one by one so the problem may be elsewhere. pyglet seems to be able to, but does not. I don't know about glut.
2014-04-01T07:41:13
vispy/vispy
230
vispy__vispy-230
[ "228" ]
15b3a1f29db193f4277965e42af4950f43cc26fc
diff --git a/vispy/app/backends/_qt.py b/vispy/app/backends/_qt.py --- a/vispy/app/backends/_qt.py +++ b/vispy/app/backends/_qt.py @@ -208,9 +208,6 @@ def closeEvent(self, ev): if self._vispy_canvas is None: return self._vispy_canvas.events.close() - # Destroy if this is a toplevel widget - if self.parent() is None: - self.destroy() def mousePressEvent(self, ev): if self._vispy_canvas is None: @@ -300,6 +297,12 @@ def _modifiers(self, event): mod += keys.META, return mod + def __del__(self): + # Destroy if this is a toplevel widget + if self.parent() is None: + self.destroy() + QtOpenGL.QGLWidget.__del__(self) + # class QtMouseEvent(MouseEvent): # special subclass of MouseEvent for propagating acceptance info back to Qt.
The Python process hangs after demos on Windows, Qt backend If I run any demo, it works, then I close the window, and the process is still running. CTRL+C does not end the process, I have to kill it manually, which is kind of annoying.
Same on Linux. On Thu, Apr 17, 2014 at 6:32 AM, Cyrille Rossant [email protected]: > If I run any demo, it works, then I close the window, and the process is > still running. CTRL+C does not end the process, I have to kill it manually, > which is kind of annoying. > > — > Reply to this email directly or view it on GitHubhttps://github.com/vispy/vispy/issues/228 > . Same on OSX for some demos (not all). Seems to have been introduced in 21df29e3629a18 On Thu, Apr 17, 2014 at 7:38 AM, Nicolas P. Rougier < [email protected]> wrote: > Same on OSX for some demos (not all). > > — > Reply to this email directly or view it on GitHubhttps://github.com/vispy/vispy/issues/228#issuecomment-40705195 > . I wonder why this would make it hang. Any idea @lcampagn? The window was destroyed before it had a chance to communicate its demise to the QApplication ? On Thu, Apr 17, 2014 at 10:34 AM, Eric89GXL [email protected]: > I wonder why this would make it hang. Any idea @lcampagnhttps://github.com/lcampagn > ? > > — > Reply to this email directly or view it on GitHubhttps://github.com/vispy/vispy/issues/228#issuecomment-40720574 > .
2014-04-17T16:52:50
vispy/vispy
245
vispy__vispy-245
[ "244" ]
255573ad52229883becf6b5b0d77e0383c6773a3
diff --git a/examples/glsl-sandbox-cube.py b/examples/glsl-sandbox-cube.py --- a/examples/glsl-sandbox-cube.py +++ b/examples/glsl-sandbox-cube.py @@ -79,7 +79,7 @@ def __init__(self, **kwargs): def on_initialize(self, event): gloo.set_clear_color((1, 1, 1, 1)) - gloo.set_state(depth=True) + gloo.set_state(depth_test=True) def on_resize(self, event): width, height = event.size
glsl-sandbox-cube GL_DEPTH issue (Linux Python 2.7.6) I get the following issue when running glsl-sanbox-cube; setting `GL_DEPTH` doesn't seem to work. ``` Traceback (most recent call last): File "glsl-sandbox-cube.py", line 82, in on_initialize gloo.set_state(depth=True) File "/usr/local/lib/python2.7/dist-packages/vispy-0.2.1-py2.7.egg/vispy/gloo/wrappers.py", line 531, in set_state func(_gl_attr(key)) File "/usr/local/lib/python2.7/dist-packages/vispy-0.2.1-py2.7.egg/vispy/gloo/wrappers.py", line 43, in _gl_attr % (x, y)) ValueError: gl has no attribute corresponding to name depth (GL_DEPTH) ``` However when I check `PyOpenGL`: ``` import OpenGL.GL as gl print gl.GL_DEPTH >> GL_DEPTH (6145) ```
2014-05-07T14:19:28
vispy/vispy
254
vispy__vispy-254
[ "252" ]
2ecd55170ebab46b90e8f04de8b11e63e15c267a
diff --git a/vispy/gloo/variable.py b/vispy/gloo/variable.py --- a/vispy/gloo/variable.py +++ b/vispy/gloo/variable.py @@ -324,6 +324,11 @@ def set_data(self, data): def _activate(self): if isinstance(self.data, VertexBuffer): self.data.activate() + size, gtype, dtype = gl_typeinfo[self._gtype] + stride = self.data.stride + gl.glEnableVertexAttribArray(self.handle) + gl.glVertexAttribPointer(self.handle, size, gtype, gl.GL_FALSE, + stride, self.data.offset) def _deactivate(self): if isinstance(self.data, VertexBuffer):
Possible bug with multiple programs? I got a weird behavior that looks like a bug, unless I'm doing something wrong when painting two programs. The following example should, in principle, display two squares: one blue on the bottom left, one red on the top right. Only the last draw call seems to be taken into account. Tested on Windows 8.1 Python 2.7 64-bit with AMD graphics card. ``` python from vispy import gloo from vispy import app import numpy as np position1 = np.zeros((1, 2)).astype(np.float32)-.5 position2 = np.zeros((1, 2)).astype(np.float32)+.5 VERT_SHADER = """ attribute vec2 a_position; void main() { gl_Position = vec4(a_position, 0.0, 1.0); gl_PointSize = 30.0; } """ FRAG_SHADER1 = """ void main() { gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0); } """ FRAG_SHADER2 = """ void main() { gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0); } """ class Canvas(app.Canvas): def __init__(self): app.Canvas.__init__(self, close_keys='escape') self.program1 = gloo.Program(VERT_SHADER, FRAG_SHADER1) # blue on the left self.program1['a_position'] = position1 self.program2 = gloo.Program(VERT_SHADER, FRAG_SHADER2) # red on the right self.program2['a_position'] = position2 def on_resize(self, event): width, height = event.size gloo.set_viewport(0, 0, width, height) def on_paint(self, event): gloo.clear(color=(0.0, 0.0, 0.0, 1.0)) self.program1.draw('points') self.program2.draw('points') c = Canvas() c.show() app.run() ```
Confirmed -- I also only get the second (red) square. If I comment out the second draw, the first (blue) square shows up. @almarklein @rougier any idea? Try: ``` def _activate(self): if isinstance(self.data, VertexBuffer): self.data.activate() size, gtype, dtype = gl_typeinfo[self._gtype] stride = self.data.stride gl.glEnableVertexAttribArray(self.handle) gl.glVertexAttribPointer(self.handle, size, gtype, gl.GL_FALSE, stride, ctypes.c_void_p(0) ``` in vispy/gloo/variable.py in Attribute._activate()
2014-05-13T17:21:42
vispy/vispy
261
vispy__vispy-261
[ "260" ]
a7fd4a304eb034ccaed4aaa7a4160d4c82f99a94
diff --git a/make/make.py b/make/make.py --- a/make/make.py +++ b/make/make.py @@ -147,7 +147,18 @@ def website(self, arg): sys.exit('Command "website" does not have subcommand "%s"' % arg) def test(self, arg): - """ Run all tests. """ + """ Run tests: + * full - run all tests + * nose - run nose tests (also for each backend) + * any backend name (e.g. pyside, pyqt4, glut, sdl2, etc.) - + run tests for the given backend + * nobackend - run tests that do not require a backend + * extra - run extra tests (line endings and style) + * lineendings - test line ending consistency + * flake - flake style testing (PEP8 and more) + """ + if not arg: + return self.help('test') from vispy import test try: test(*(arg.split())) diff --git a/vispy/__init__.py b/vispy/__init__.py --- a/vispy/__init__.py +++ b/vispy/__init__.py @@ -33,6 +33,23 @@ from .util import (dataio, _parse_command_line_arguments, config, # noqa set_log_level, keys, sys_info) # noqa -from .testing import _tester as test # noqa _parse_command_line_arguments() + + +# Define test proxy function, so we don't have to import vispy.testing now +def test(label='full', coverage=False, verbosity=1): + """Test vispy software + + Parameters + ---------- + label : str + Can be one of 'full', 'nose', 'nobackend', 'extra', 'lineendings', + 'flake', or any backend name (e.g., 'qt'). + coverage : bool + Produce coverage outputs (.coverage file and printing). + verbosity : int + Verbosity level to use when running ``nose``. + """ + from .testing import _tester + return _tester(label, coverage, verbosity)
diff --git a/vispy/testing/_runners.py b/vispy/testing/_runners.py --- a/vispy/testing/_runners.py +++ b/vispy/testing/_runners.py @@ -132,17 +132,7 @@ def _check_line_endings(): def _tester(label='full', coverage=False, verbosity=1): - """Test vispy software - - Parameters - ---------- - label : str - Can be one of 'full', 'nose', 'nobackend', 'extra', 'lineendings', - 'flake', or any backend name (e.g., 'qt'). - coverage : bool - Produce coverage outputs (.coverage file and printing). - verbosity : int - Verbosity level to use when running ``nose``. + """Test vispy software. See vispy.test() """ from vispy.app.backends import BACKEND_NAMES as backend_names label = label.lower() diff --git a/vispy/util/tests/test_import.py b/vispy/util/tests/test_import.py --- a/vispy/util/tests/test_import.py +++ b/vispy/util/tests/test_import.py @@ -14,7 +14,7 @@ # minimum that will be imported when importing vispy -_min_modules = ['vispy', 'vispy.util', 'vispy.testing', 'vispy.ext'] +_min_modules = ['vispy', 'vispy.util', 'vispy.ext'] def check_output(*popenargs, **kwargs):
test functionality always gets imported I'd say that `vispy.testing` only needs to be imported when testing. It should not be dragged in by default IMO.
The nice thing about having it in by default is that one can do vispy.test(), just like in numpy, scipy, scikit-learn (I think), etc. So I am -1 on removing it, unless it seems like it's slowing down the package, or might in the long run...? I am also a bit wary of unnecessary side effects. I don't think we have that right now, but in earlier versions of the testing module we actually imported all backends. What about: ``` def test(*bla): from vispy.testing import test as test_ # noqa return test_(*bla) ``` +1 for nested import, this is a good use case for it +1
2014-05-26T09:52:49
vispy/vispy
302
vispy__vispy-302
[ "299" ]
f3b52e890c2b60b776c7f148da6eee7bce029520
diff --git a/vispy/gloo/program.py b/vispy/gloo/program.py --- a/vispy/gloo/program.py +++ b/vispy/gloo/program.py @@ -307,7 +307,10 @@ def _activate(self): logger.debug("GPU: Activating program") gl.glUseProgram(self.handle) + + self._activate_variables() + def _activate_variables(self): for uniform in self._uniforms.values(): if uniform.active: uniform.activate() diff --git a/vispy/scene/shaders/composite.py b/vispy/scene/shaders/composite.py --- a/vispy/scene/shaders/composite.py +++ b/vispy/scene/shaders/composite.py @@ -120,6 +120,9 @@ def __init__(self, vmain, fmain): # hook definitions self._hook_defs = {} # {'hook_name': Function} + + # Cache state of Variables so we know which ones require update + self._variable_state = {} self._find_hooks() @@ -267,12 +270,15 @@ def _build(self): self.vshader = vs self.fshader = fs - # set all variables.. - self._apply_variables() - # and continue. super(ModularProgram, self)._build() + def _activate_variables(self): + # set all variables + self._apply_variables() + + super(ModularProgram, self)._activate_variables() + def _find_hooks(self): # Locate all undefined function prototypes in both shaders vprots = parsing.find_prototypes(self.vmain) @@ -463,4 +469,7 @@ def _apply_variables(self): if isinstance(spec, Function) or spec.vtype == 'varying': continue logger.debug(" %s = %s" % (name, spec.value)) - self[name] = spec.value + state_id = spec.state_id + if self._variable_state.get(name, None) != state_id: + self[name] = spec.value + self._variable_state[name] = state_id diff --git a/vispy/scene/shaders/function.py b/vispy/scene/shaders/function.py --- a/vispy/scene/shaders/function.py +++ b/vispy/scene/shaders/function.py @@ -68,10 +68,11 @@ class Variable(ShaderObject): Created by Function.__getitem__ """ def __init__(self, function, name, spec=None, anonymous=False): + self._state_counter = 0 super(Variable, self).__init__() self.function = function self._name = name # local name within the function - self.spec = None # (vtype, dtype, value) + self._spec = None # (vtype, dtype, value) if spec is not None: self.spec = spec self.anonymous = anonymous # if True, variable may be renamed. @@ -116,6 +117,22 @@ def value(self): "determine value." % self) return self.spec[2] + @property + def spec(self): + return self._spec + + @spec.setter + def spec(self, s): + self._spec = s + self._state_counter += 1 + + @property + def state_id(self): + """Return a unique ID that changes whenever the state of the Variable + has changed. This allows ModularProgram to quickly determine whether + the value has changed since it was last used.""" + return id(self), self._state_counter + def __repr__(self): return ("<Variable '%s' on %s (%d)>" % (self.name, self.function.name, id(self)))
Bug with ModularProgram: variables not updated In [this example](https://github.com/vispy/experimental/blob/master/cr_plot/bug.py), using the mouse wheel should allow you to zoom in or out. It doesn't work, but it does if the program is reconstructed at every draw (which can be done by setting `_dirty` to `True` all the time for instance). This example requires the `scenegraph` branch of Vispy.
2014-07-06T13:28:26
vispy/vispy
304
vispy__vispy-304
[ "250" ]
b3d1cbfd1b5e97d0b629bf802ca4aabfca0fd06d
diff --git a/vispy/app/base.py b/vispy/app/base.py --- a/vispy/app/base.py +++ b/vispy/app/base.py @@ -173,6 +173,8 @@ def _vispy_mouse_move(self, **kwds): last_event = self._vispy_mouse_data['last_event'] if last_event is not None: last_event._forget_last_event() + else: + kwds['button'] = self._vispy_mouse_data['press_event'].button ev = self._vispy_canvas.events.mouse_move(**kwds) self._vispy_mouse_data['last_event'] = ev diff --git a/vispy/app/canvas.py b/vispy/app/canvas.py --- a/vispy/app/canvas.py +++ b/vispy/app/canvas.py @@ -394,9 +394,11 @@ class MouseEvent(Event): String indicating the event type (e.g. mouse_press, key_release) pos : (int, int) The position of the mouse (in screen coordinates). - button : int + button : int | None The button that generated this event (can be None). - Left=1, right=2, middle=3. + Left=1, right=2, middle=3. During a mouse drag, this + will return the button that started the drag (same thing as + ``event.press_event.button``). buttons : [int, ...] The list of buttons depressed during this event. modifiers : tuple of Key instances @@ -424,7 +426,7 @@ def __init__(self, type, pos=None, button=None, buttons=None, **kwds): Event.__init__(self, type, **kwds) self._pos = (0, 0) if (pos is None) else (pos[0], pos[1]) - self._button = int(button) if (button is not None) else 0 + self._button = int(button) if (button is not None) else None self._buttons = [] if (buttons is None) else buttons self._modifiers = tuple(modifiers or ()) self._delta = (0.0, 0.0) if (delta is None) else (delta[0], delta[1])
While dragging, event.button is 0 instead of the actual press button One has to do `event.press_event.button` instead.
2014-07-06T15:58:50
vispy/vispy
305
vispy__vispy-305
[ "248" ]
b3d1cbfd1b5e97d0b629bf802ca4aabfca0fd06d
diff --git a/vispy/app/timer.py b/vispy/app/timer.py --- a/vispy/app/timer.py +++ b/vispy/app/timer.py @@ -103,7 +103,7 @@ def start(self, interval=None, iterations=None): self.max_iterations = iterations self._backend._vispy_start(self.interval) self._running = True - self._last_emit_time = None + self._last_emit_time = precision_time() self.events.start(type='timer_start') def stop(self): @@ -139,10 +139,7 @@ def _timeout(self, *args): # compute dt since last event now = precision_time() - if self._last_emit_time is None: - dt = None - else: - dt = now - self._last_emit_time + dt = now - self._last_emit_time self._last_emit_time = now self.events.timeout(
The first emitted Timer event has `None` as `dt` property ``` python def on_timer(self, event): print event.dt ``` displays `None` the first time, and the correct dt then (a float). The first dt should probably be `0.0`.
I actually think `None` makes more sense. If not `None`, then probably `np.inf` (or maybe even `np.nan`). If the idea is that `event.dt` gives you the time between the previous event and the current one, then it's undefined for the first event. However, I guess doing `np.inf` would functionally make the most sense since comparisons like `if dt > 1 / 60.:` would work. Is that what you want it for? This is tough! `dt` is indeed undefined, so I lean towards None also. `inf` or `0.0` may make that calculations keep working (no `if dt is not None` required), but I guess dt will be often used in division, which would result in ZeroDivisionError. `inf` may also result in nasty behavior (we better not have someone do `time.sleep(dt)`. Another option is to set the start point when the user calls `Timer.start()`. That way the first dt measures the time since the timer started, which should be roughly the same interval as all subsequent timeouts.. > Another option is to set the start point when the user calls Timer.start(). That way the first dt measures the time since the timer started, which should be roughly the same interval as all subsequent timeouts.. +1 Doh! +1
2014-07-06T22:16:03
vispy/vispy
324
vispy__vispy-324
[ "323" ]
c90af29d86de9e1133820843eb305681f26e49df
diff --git a/examples/visuals/mesh_visual.py b/examples/visuals/mesh_visual.py --- a/examples/visuals/mesh_visual.py +++ b/examples/visuals/mesh_visual.py @@ -92,9 +92,9 @@ def __init__(self): for i, mesh in enumerate(self.meshes): x = 2.0 * (i % grid[0]) / grid[0] + 4.0 / grid[0] - 2 y = - 2.0 * (i // grid[1]) / grid[1] - 4.0 / grid[1] + 2 - mesh.transform = ChainTransform([self.rotation, - STTransform(translate=(x, y), - scale=(s, s, s))]) + mesh.transform = ChainTransform([STTransform(translate=(x, y), + scale=(s, s, s)), + self.rotation]) vispy.app.Canvas.__init__(self, close_keys='escape') self.size = (800, 800) diff --git a/vispy/scene/visuals/modular_visual.py b/vispy/scene/visuals/modular_visual.py --- a/vispy/scene/visuals/modular_visual.py +++ b/vispy/scene/visuals/modular_visual.py @@ -9,7 +9,7 @@ from ... import gloo from .visual import Visual from ..shaders import ModularProgram -from ..transforms import ChainTransform +#from ..transforms import ChainTransform from ..components import (VisualComponent, XYPosComponent, XYZPosComponent, UniformColorComponent, VertexColorComponent) @@ -358,7 +358,7 @@ def _activate_transform(self, event=None): # TODO: this must be optimized. # Allow using as plain visual or in a scenegraph t = self.transform if (event is None) else event.render_transform - if isinstance(t, ChainTransform): - t.simplify() # Reduce number of transforms + #if isinstance(t, ChainTransform): + # t.simplify() # Reduce number of transforms #self._program['map_local_to_nd'] = self.transform.shader_map() self._program['map_local_to_nd'] = t.shader_map()
BUG: Mesh example doesn't rotate I assume it's related to the `gloo` tweaks that were done recently, but `examples/visuals/mesh_visual.py` doesn't rotate. @lcampagn any idea?
I confirmed the `rotate` function is being called by adding a print statement, so it must be something else. Yeah, at some point the system for simplifying transforms was changed, and this broke the connection between modifying the transform and updating the final program. Haven't gotten around to fixing it because we are currently working through an overhaul of the shader / program system anyway. Cool, just wanted to make sure we were aware of it. It looks like the code used to have a TODO/Hack to get it to update by setting the `_program._need_build` property, looks like that's no longer a workaround.
2014-07-15T23:23:55
vispy/vispy
334
vispy__vispy-334
[ "137" ]
e54ab28c5b712714e43e10c4bde87e211f8efc19
diff --git a/examples/demo/brain.py b/examples/demo/brain.py new file mode 100644 --- /dev/null +++ b/examples/demo/brain.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# vispy: gallery 2 +# Copyright (c) 2014, Vispy Development Team. +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" +3D brain mesh viewer. +""" + +from timeit import default_timer +import numpy as np + +from vispy import gloo +from vispy import app +from vispy.util.transforms import perspective, translate, rotate +from vispy.util import get_data_file + +brain = np.load(get_data_file('brain/brain.npz')) +data = brain['vertex_buffer'] +faces = brain['index_buffer'] + +VERT_SHADER = """ +#version 120 +uniform mat4 u_model; +uniform mat4 u_view; +uniform mat4 u_projection; +uniform vec4 u_color; + +attribute vec3 a_position; +attribute vec3 a_normal; +attribute vec4 a_color; + +varying vec3 v_position; +varying vec3 v_normal; +varying vec4 v_color; + +void main() +{ + v_normal = a_normal; + v_position = a_position; + v_color = a_color * u_color; + gl_Position = u_projection * u_view * u_model * vec4(a_position,1.0); +} +""" + +FRAG_SHADER = """ +#version 120 +uniform mat4 u_model; +uniform mat4 u_view; +uniform mat4 u_normal; + +uniform vec3 u_light_intensity; +uniform vec3 u_light_position; + +varying vec3 v_position; +varying vec3 v_normal; +varying vec4 v_color; + +void main() +{ + // Calculate normal in world coordinates + vec3 normal = normalize(u_normal * vec4(v_normal,1.0)).xyz; + + // Calculate the location of this fragment (pixel) in world coordinates + vec3 position = vec3(u_view*u_model * vec4(v_position, 1)); + + // Calculate the vector from this pixels surface to the light source + vec3 surfaceToLight = u_light_position - position; + + // Calculate the cosine of the angle of incidence (brightness) + float brightness = dot(normal, surfaceToLight) / + (length(surfaceToLight) * length(normal)); + brightness = max(min(brightness,1.0),0.0); + + // Calculate final color of the pixel, based on: + // 1. The angle of incidence: brightness + // 2. The color/intensities of the light: light.intensities + // 3. The texture and texture coord: texture(tex, fragTexCoord) + + // Specular lighting. + vec3 surfaceToCamera = vec3(0.0, 0.0, 1.0) - position; + vec3 K = normalize(normalize(surfaceToLight) + normalize(surfaceToCamera)); + float specular = clamp(pow(abs(dot(normal, K)), 40.), 0.0, 1.0); + + gl_FragColor = v_color * brightness * vec4(u_light_intensity, 1); +} +""" + + +class Canvas(app.Canvas): + def __init__(self): + app.Canvas.__init__(self, close_keys='escape') + self.size = 800, 600 + + self.program = gloo.Program(VERT_SHADER, FRAG_SHADER) + + self.theta, self.phi = -80, 180 + self.translate = 3 + + self.faces = gloo.IndexBuffer(faces) + self.program.bind(gloo.VertexBuffer(data)) + + self.program['u_color'] = 1, 1, 1, 1 + self.program['u_light_position'] = (1., 1., 1.) + self.program['u_light_intensity'] = (1., 1., 1.) + + self._t0 = default_timer() + self._timer = app.Timer(1. / 60) + self._timer.connect(self.on_timer) + self._timer.start() + + self.update_matrices() + + def update_matrices(self): + self.view = np.eye(4, dtype=np.float32) + self.model = np.eye(4, dtype=np.float32) + self.projection = np.eye(4, dtype=np.float32) + + rotate(self.model, self.theta, 1, 0, 0) + rotate(self.model, self.phi, 0, 1, 0) + + translate(self.view, 0, 0, -self.translate) + + self.program['u_model'] = self.model + self.program['u_view'] = self.view + self.program['u_normal'] = np.array(np.matrix(np.dot(self.view, + self.model)).I.T) + + def on_initialize(self, event): + gloo.set_state(blend=False, depth_test=True, polygon_offset_fill=True) + + def on_timer(self, event): + elapsed = default_timer() - self._t0 + self.phi = 180 + elapsed * 50. + self.update_matrices() + self.update() + + def on_resize(self, event): + width, height = event.size + gloo.set_viewport(0, 0, width, height) + self.projection = perspective(45.0, width / float(height), 1.0, 20.0) + self.program['u_projection'] = self.projection + + def on_mouse_wheel(self, event): + self.translate += -event.delta[1]/5. + self.translate = max(2, self.translate) + self.update_matrices() + self.update() + + def on_draw(self, event): + gloo.clear() + self.program.draw('triangles', indices=self.faces) + +if __name__ == '__main__': + c = Canvas() + c.show() + app.run() diff --git a/examples/demo/signals.py b/examples/demo/signals.py new file mode 100644 --- /dev/null +++ b/examples/demo/signals.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# vispy: gallery 2 +# Copyright (c) 2014, Vispy Development Team. +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" +Multiple digital signals. +""" + +from vispy import gloo +from vispy import app +import numpy as np +import math + +m = 20 +n = 25000 +x = np.tile(np.linspace(-1., 1., n), m) +y = .1 * np.random.randn(m, n) +y += np.arange(m).reshape((-1, 1)) + +data = np.zeros(n*m, dtype=[ + ('a_position', np.float32, 2), + ('a_color', np.float32, 3), + ('a_index', np.float32, 1), +]) + +data['a_position'] = np.zeros((n*m, 2), dtype=np.float32) +data['a_position'][:, 0] = x +data['a_position'][:, 1] = .9*(y.ravel()/y.max()*2-1) + +data['a_color'] = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9), + n, axis=0) + +data['a_index'] = np.repeat(np.arange(m), n) + +VERT_SHADER = """ +#version 120 +attribute vec2 a_position; +attribute float a_index; +varying float v_index; + +attribute vec3 a_color; +varying vec3 v_color; + +uniform vec2 u_pan; +uniform vec2 u_scale; + +void main() { + + vec2 position_tr = u_scale * (a_position + u_pan); + gl_Position = vec4(position_tr, 0.0, 1.0); + v_color = a_color; + v_index = a_index; +} +""" + +FRAG_SHADER = """ +#version 120 +varying vec3 v_color; +varying float v_index; +void main() { + gl_FragColor = vec4(v_color, 1.0); + if ((fract(v_index) > .00001) && (fract(v_index) < .99999)) + gl_FragColor.a = 0.; +} +""" + + +class Canvas(app.Canvas): + def __init__(self): + app.Canvas.__init__(self, close_keys='escape') + self.program = gloo.Program(VERT_SHADER, FRAG_SHADER) + self.program.bind(gloo.VertexBuffer(data)) + + self.program['u_pan'] = (0., 0.) + self.program['u_scale'] = (1., 1.) + + def on_initialize(self, event): + gloo.set_state(clear_color=(1, 1, 1, 1), blend=True, + blend_func=('src_alpha', 'one_minus_src_alpha')) + + def on_resize(self, event): + self.width, self.height = event.size + gloo.set_viewport(0, 0, self.width, self.height) + + def on_draw(self, event): + gloo.clear(color=(0.0, 0.0, 0.0, 1.0)) + self.program.draw('line_strip') + + def _normalize(self, x_y): + x, y = x_y + w, h = float(self.width), float(self.height) + return x/(w/2.)-1., y/(h/2.)-1. + + def on_mouse_move(self, event): + if event.is_dragging: + x0, y0 = self._normalize(event.press_event.pos) + x1, y1 = self._normalize(event.last_event.pos) + x, y = self._normalize(event.pos) + dx, dy = x - x1, -(y - y1) + button = event.press_event.button + + pan_x, pan_y = self.program['u_pan'] + scale_x, scale_y = self.program['u_scale'] + + if button == 1: + self.program['u_pan'] = (pan_x+dx/scale_x, pan_y+dy/scale_y) + elif button == 2: + scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx), + scale_y * math.exp(2.5*dy)) + self.program['u_scale'] = (scale_x_new, scale_y_new) + self.program['u_pan'] = (pan_x - + x0 * (1./scale_x - 1./scale_x_new), + pan_y + + y0 * (1./scale_y - 1./scale_y_new)) + self.update() + + def on_mouse_wheel(self, event): + dx = np.sign(event.delta[1])*.05 + scale_x, scale_y = self.program['u_scale'] + scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx), + scale_y * math.exp(2.5*dx)) + self.program['u_scale'] = (scale_x_new, scale_y_new) + self.update() + +if __name__ == '__main__': + c = Canvas() + c.show() + app.run()
New example: simple scientific interactive plot with vispy.gloo This is an example illustrating the scientific plotting capabilities of vispy, using only vispy.gloo. Of course, this example will be useless in the future once we have high-level plotting interfaces. It is just illustrative to have that in the meantime. We choose a mathematical function `x --> f(x)` and display it using a VBO. At first, we could simply use `GL_LINE_STRIP`, but we could also use antialiased lines. Then, we can implement interactivity (panning and zooming) on the GPU (vertex shader).
2014-07-24T18:33:37
vispy/vispy
335
vispy__vispy-335
[ "247" ]
a2a6ba11f4c518da16c3ddb9d50deaafe0f29280
diff --git a/vispy/app/timer.py b/vispy/app/timer.py --- a/vispy/app/timer.py +++ b/vispy/app/timer.py @@ -56,6 +56,7 @@ def __init__(self, interval=0.0, connect=None, iterations=-1, start=False, self._interval = interval self._running = False + self._first_emit_time = None self._last_emit_time = None self.iter_count = 0 self.max_iterations = iterations @@ -81,6 +82,10 @@ def interval(self, val): self.stop() self.start() + @property + def elapsed(self): + return precision_time() - self._first_emit_time + @property def running(self): return self._running @@ -103,6 +108,7 @@ def start(self, interval=None, iterations=None): self.max_iterations = iterations self._backend._vispy_start(self.interval) self._running = True + self._first_emit_time = precision_time() self._last_emit_time = precision_time() self.events.start(type='timer_start') @@ -140,11 +146,13 @@ def _timeout(self, *args): # compute dt since last event now = precision_time() dt = now - self._last_emit_time + elapsed = now - self._first_emit_time self._last_emit_time = now self.events.timeout( type='timer_timeout', iteration=self.iter_count, + elapsed=elapsed, dt=dt) self.iter_count += 1
diff --git a/vispy/app/tests/test_app.py b/vispy/app/tests/test_app.py --- a/vispy/app/tests/test_app.py +++ b/vispy/app/tests/test_app.py @@ -305,6 +305,8 @@ def fake(event): timer.interval = 0.002 assert_equal(timer.interval, 0.002) assert_true(timer.running) + sleep(.003) + assert_true(timer.elapsed >= 0.002) timer.stop() assert_true(not timer.running) assert_true(timer.native)
Adding `elapsed` property to Timer event? `event.elapsed` would be a shortcut to ~~`event.dt * event.iteration`~~. Actually it's a bit more complicated because `event.dt` is not constant, so it should rather be the sum of all `event.dt`s.
Would it be better to just return `ptime.time() - timer.start_time` ? Note that this would mean that calling `timer.elapsed` twice within an iteration would return different results (which I think would be the expected behavior). Yes, probably, maybe using [default timer](https://docs.python.org/2/library/timeit.html#timeit.default_timer). I was not aware of default_timer. We should test that on all platforms and see whether it can be used to replace `util.ptime`. Oh, I didn't see there was a custom function here. I think it would be better to use default_timer (I usually use this function), but it would require some testing to be completely sure it's working fine. I'm going to open a new issue. > Yes, probably, maybe using default timer. Or `time.perf_counter` on py33+ https://docs.python.org/dev/library/time.html#time.perf_counter
2014-07-24T19:56:13
vispy/vispy
340
vispy__vispy-340
[ "135" ]
2967d5dfa7ce7748298a35ea675a2d54308fc917
diff --git a/examples/demo/realtime_signals.py b/examples/demo/realtime_signals.py new file mode 100644 --- /dev/null +++ b/examples/demo/realtime_signals.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# vispy: gallery 2 +# Copyright (c) 2014, Vispy Development Team. +# Distributed under the (new) BSD License. See LICENSE.txt for more info. + +""" +Multiple real-time digital signals with GLSL-based clipping. +""" + +from vispy import gloo +from vispy import app +import numpy as np +import math + +# Number of cols and rows in the table. +nrows = 16 +ncols = 20 + +# Number of signals. +m = nrows*ncols + +# Number of samples per signal. +n = 1000 + +# Various signal amplitudes. +amplitudes = .1 + .2 * np.random.rand(m, 1).astype(np.float32) + +# Generate the signals as a (m, n) array. +y = amplitudes * np.random.randn(m, n).astype(np.float32) + +# Color of each vertex (TODO: make it more efficient by using a GLSL-based +# color map and the index). +color = np.repeat(np.random.uniform(size=(m, 3), low=.5, high=.9), + n, axis=0).astype(np.float32) + +# Signal 2D index of each vertex (row and col) and x-index (sample index +# within each signal). +index = np.c_[np.repeat(np.repeat(np.arange(ncols), nrows), n), + np.repeat(np.tile(np.arange(nrows), ncols), n), + np.tile(np.arange(n), m)].astype(np.float32) + +VERT_SHADER = """ +#version 120 + +// y coordinate of the position. +attribute float a_position; + +// row, col, and time index. +attribute vec3 a_index; +varying vec3 v_index; + +// 2D scaling factor (zooming). +uniform vec2 u_scale; + +// Size of the table. +uniform vec2 u_size; + +// Number of samples per signal. +uniform float u_n; + +// Color. +attribute vec3 a_color; +varying vec4 v_color; + +// Varying variables used for clipping in the fragment shader. +varying vec2 v_position; +varying vec4 v_ab; + +void main() { + float nrows = u_size.x; + float ncols = u_size.y; + + // Compute the x coordinate from the time index. + float x = -1 + 2*a_index.z / (u_n-1); + vec2 position = vec2(x, a_position); + + // Find the affine transformation for the subplots. + vec2 a = vec2(1./ncols, 1./nrows)*.9; + vec2 b = vec2(-1 + 2*(a_index.x+.5) / ncols, + -1 + 2*(a_index.y+.5) / nrows); + // Apply the static subplot transformation + scaling. + gl_Position = vec4(a*u_scale*position+b, 0.0, 1.0); + + v_color = vec4(a_color, 1.); + v_index = a_index; + + // For clipping test in the fragment shader. + v_position = gl_Position.xy; + v_ab = vec4(a, b); +} +""" + +FRAG_SHADER = """ +#version 120 + +varying vec4 v_color; +varying vec3 v_index; + +varying vec2 v_position; +varying vec4 v_ab; + +void main() { + gl_FragColor = v_color; + + // Discard the fragments between the signals (emulate glMultiDrawArrays). + if ((fract(v_index.x) > 0.) || (fract(v_index.y) > 0.)) + discard; + + // Clipping test. + vec2 test = abs((v_position.xy-v_ab.za)/v_ab.xy); + if ((test.x > 1) || (test.y > 1)) + discard; +} +""" + + +class Canvas(app.Canvas): + def __init__(self): + app.Canvas.__init__(self, title='Use your wheel to zoom!', + close_keys='escape') + self.program = gloo.Program(VERT_SHADER, FRAG_SHADER) + self.program['a_position'] = y.ravel() + self.program['a_color'] = color + self.program['a_index'] = index + self.program['u_scale'] = (1., 1.) + self.program['u_size'] = (nrows, ncols) + self.program['u_n'] = n + + self.timer = app.Timer(1. / 60) + self.timer.connect(self.on_timer) + self.timer.start() + + def on_initialize(self, event): + gloo.set_state(clear_color=(1, 1, 1, 1), blend=True, + blend_func=('src_alpha', 'one_minus_src_alpha')) + + def on_resize(self, event): + self.width, self.height = event.size + gloo.set_viewport(0, 0, self.width, self.height) + + def on_mouse_wheel(self, event): + dx = np.sign(event.delta[1]) * .05 + scale_x, scale_y = self.program['u_scale'] + scale_x_new, scale_y_new = (scale_x * math.exp(2.5*dx), + scale_y * math.exp(0.0*dx)) + self.program['u_scale'] = (max(1, scale_x_new), max(1, scale_y_new)) + self.update() + + def on_timer(self, event): + """Add some data at the end of each signal (real-time signals).""" + k = 10 + y[:, :-k] = y[:, k:] + y[:, -k:] = amplitudes * np.random.randn(m, k) + + self.program['a_position'].set_data(y.ravel().astype(np.float32)) + self.update() + + def on_draw(self, event): + gloo.clear(color=(0.0, 0.0, 0.0, 1.0)) + self.program.draw('line_strip') + +if __name__ == '__main__': + c = Canvas() + c.show() + app.run()
New example: data acquisition system Imagine a Data Acquisition System receiving real-time multi-channel digital signals. There are two ways to display the incoming data: - Scrolling: the window shows `[t-h:t]` where `t` is the current time, and `h` the size (duration) of the window. Old data is discarded, and new buffered data comes from the right side of the window. - Rotating: the window is split in two parts. The left part (new data) shows `[t-a*t,t]`, the right part (old data) shows `[t-b*t,t-a*t]`. In other words, the screen is constantly refreshed following a rotating vertical line (current time) that goes from left to right. It should be possible to toggle dynamically between the two. Technically, this could be made efficient by considering a VBO containing only the data samples (not the time samples). This VBO is updated in real-time with `set_subdata` on a circularly changing sub-window. There is another VBO with integer indices for all vertices (static). Besides, there are variables containing the current time, the window duration, and the current position (integer) within the window. The time samples should not be computed on the CPU and transferred to the GPU. Rather, they should be generated dynamically on the vertex shader using the vertex indices and the current time. This saves 2x memory transfer. Suggested by @samuelgarcia.
If you do that, I will pay to each of you and all your descendant on 3 generations a bottle of wine. Promise. There is also something to consider for this kind of "oscilloscope" is a by channel gain and offset. On pyacq project, I did that. It allow the user to spread channel with fake gain/offset like a real oscilloscope. Good idea. This is also useful for purely offline visualization. (ping @nippoo) > If you do that, I will pay to each of you and all your descendant on 3 generations a bottle of wine. That's a lot of wine :)
2014-07-30T22:50:14
vispy/vispy
354
vispy__vispy-354
[ "350" ]
b4fe7d8d942e1ae0ce0a17e2313425405ea05d1b
diff --git a/vispy/util/_logging.py b/vispy/util/_logging.py --- a/vispy/util/_logging.py +++ b/vispy/util/_logging.py @@ -7,6 +7,7 @@ import inspect import re import traceback +from functools import partial from ..ext.six import string_types @@ -63,8 +64,7 @@ class _VispyStreamHandler(logging.StreamHandler): Prepending of traceback information is done in _VispyFormatter. """ def __init__(self): - #logging.StreamHandler.__init__(self, _WrapStdOut()) - logging.StreamHandler.__init__(self, sys.stdout) + logging.StreamHandler.__init__(self, sys.stderr) self._vispy_formatter = _lf self.setFormatter(self._vispy_formatter) self._vispy_match = None @@ -76,17 +76,13 @@ def _vispy_emit_match_andor_record(self, record): """Log message emitter that optionally matches and/or records""" test = record.getMessage() match = self._vispy_match - if (match is None or re.search(match, test) or + if (match is None or re.search(match, test) or re.search(match, _get_vispy_caller())): if self._vispy_emit_record: fmt_rec = self._vispy_formatter.format(record) self._vispy_emit_list.append(fmt_rec) return logging.StreamHandler.emit(self, record) - def _vispy_emit(self, record): - """Log message emitter that wraps directly to the standard method""" - return logging.StreamHandler.emit(self, record) - def _vispy_set_match(self, match): old_match = self._vispy_match self._vispy_match = match @@ -94,7 +90,7 @@ def _vispy_set_match(self, match): if match is not None or self._vispy_emit_record: self.emit = self._vispy_emit_match_andor_record else: - self.emit = self._vispy_emit + self.emit = partial(logging.StreamHandler.emit, self) return old_match def _vispy_set_emit_record(self, record): @@ -104,7 +100,7 @@ def _vispy_set_emit_record(self, record): if match is not None or self._vispy_emit_record: self.emit = self._vispy_emit_match_andor_record else: - self.emit = self._vispy_emit + self.emit = partial(logging.StreamHandler.emit, self) def _vispy_reset_list(self): self._vispy_emit_list = list() diff --git a/vispy/util/event.py b/vispy/util/event.py --- a/vispy/util/event.py +++ b/vispy/util/event.py @@ -18,6 +18,7 @@ import inspect import weakref import traceback +import math from .ordereddict import OrderedDict from ._logging import logger @@ -190,6 +191,7 @@ def __init__(self, source=None, type=None, event_class=Event): self._emitting = False # used to detect emitter loops self.source = source self.default_args = {} + self._err_registry = {} if type is not None: self.default_args['type'] = type @@ -197,7 +199,7 @@ def __init__(self, source=None, type=None, event_class=Event): self.event_class = event_class self._ignore_callback_errors = True - self._print_callback_errors = True + self.print_callback_errors = 'reminders' @property def ignore_callback_errors(self): @@ -216,6 +218,10 @@ def ignore_callback_errors(self, val): def print_callback_errors(self): """Print a message and stack trace if a callback raises an exception + Valid values are "first" (only show first instance), "reminders" (show + complete first instance, then counts), "always" (always show full + traceback), or "never". + This assumes ignore_callback_errors=True. These will be raised as warnings, so ensure that the vispy logging level is set to at least "warning". @@ -224,6 +230,9 @@ def print_callback_errors(self): @print_callback_errors.setter def print_callback_errors(self, val): + if val not in ('first', 'reminders', 'always', 'never'): + raise ValueError('print_callback_errors must be "first", ' + '"reminders", "always", or "never"') self._print_callback_errors = val @property @@ -432,10 +441,32 @@ def _invoke_callback(self, cb, event): del tb # Get rid of it in this namespace # Handle if self.ignore_callback_errors: - if self.print_callback_errors: - logger.log_exception() - logger.warning("Error invoking callback %s for " - "event: %s" % (cb, event)) + if self.print_callback_errors != "never": + this_print = 'full' + if self.print_callback_errors in ('first', 'reminders'): + # need to check to see if we've hit this yet + key = repr(cb) + repr(event) + if key in self._err_registry: + self._err_registry[key] += 1 + if self.print_callback_errors == 'first': + this_print = None + else: # reminders + ii = self._err_registry[key] + # Use logarithmic selection + # (1, 2, ..., 10, 20, ..., 100, 200, ...) + if ii % (10 ** int(math.log10(ii))) == 0: + this_print = ii + else: + this_print = None + else: + self._err_registry[key] = 1 + if this_print == 'full': + logger.log_exception() + logger.warning("Error invoking callback %s for " + "event: %s" % (cb, event)) + elif this_print is not None: + logger.warning("Error invoking callback %s repeat %s" + % (cb, this_print)) else: raise
diff --git a/vispy/util/tests/test_event_emitter.py b/vispy/util/tests/test_event_emitter.py --- a/vispy/util/tests/test_event_emitter.py +++ b/vispy/util/tests/test_event_emitter.py @@ -245,7 +245,7 @@ def test_chained_emitters(self): def test_emitter_error_handling(self): """Emitter error handling""" em = EventEmitter(type='test_event') - em.print_callback_errors = False + em.print_callback_errors = 'never' def cb(ev): raise Exception('test')
FIX: Reduce instances of errors in callbacks Maybe we should have a registry like `warnings` does that prevents error massages from being printed again. It makes debugging very difficult to have the same error message printed every `on_draw` call... thoughts? Relevent for @nmz787's comment in #213.
@nmz787, responding to your post in #213 over here since this is the more relevant place to put it. I know the type of behavior you mean, where you get an error in the `on_initialize` or `on_draw`, and subsequent calls to `on_draw` are broken and thus spew a ton of errors. I'm not sure if it's possible to throw an error at the application level and terminate in the case of a failed `on_initialize` or `on_draw` callback, due to how event loops are handled at the application level. @almarklein do you know? It should definitely be possible, however, to have a type of registry system (like the `warnings` package uses) where a given error message is only reported once, even if it doesn't prevent continued execution. I'm also not sure that we'd want to discontinue execution in _all_ cases when a callback fails, so we probably want to implement a registry-like behavior in any case. Maybe a dictionary where the exception string is the key, and the value is a counter. A message only prints if it isn't in the dictionary, otherwise it just increments the value stored. From what I've read it's fastest (computational time) to implement something with a try except block: ``` def printWarning(messageString): try: warnings[messageString]+=1 else: warnings[messageString]=1 print messageString ``` then maybe write those data to a log file on exit? ``` def ExitHandler(): with open('vispy_warning_error.log','a+') as warningFile: for message in warnings: warningFile.write('The following message was emitted %s times.\n' % warnings[message]) warningFile.write(str(message)) warningFile.write('\n----\n') atexit.register(ExitHandler) ``` That seems reasonable. We can probably get away with not logging for a first pass. (We'd want it to be optional, we'd also want to think about logging everything from `vispy.util.logger`, etc. -- it's a bit of a can of worms there.) Is a `try` actually faster than doing `if messageString not in warnings:`??? That would be weird, but good to know. I think something like that would work well. Let's wait to hear from the other devs before implementing anything, though. IIRC there is some control over the callback error messages already, but I don't remember what it is. I can look it up if the other devs don't chime in about it. @lcampagn you've worked with the events system a bit -- any thoughts? `warnings` looks like it could be good, but I've never worked with it before. I guess I was wrong about `try/except` it being 'fastest', I believe I was influenced by this post a few months ago: Python dictionary: add or increment entry http://stackoverflow.com/a/2627871/253127 Just to be clear, I don't actually want to use the `warnings` package, but rather implement something like it does where we use our own callback error registry. We shouldn't shadow the name `warnings` either (since it's a built-in package name), so we should call the registry dict something like `_vispy_callback_errors` or so if we do decide to go this route. > I'm not sure if it's possible to throw an error at the application level and terminate in the case of a failed on_initialize or on_draw callback, You _could_ do sys.exit() but I would not recommend that, since that means we cannot go into postmortem debugging. > Is a try actually faster than doing if messageString not in warnings:??? `try` is faster than `if` when it does not trigger an exception. If it does, it is much slower. Well the exception happens once, the try is successful probably 1000s of times in my last case. Not too keen on trying to hide repeated error messages. This can lead to a lot of frustration in debugging if you're not aware this is happening. Adding another message like "Suppressed N repeated error messages" might alleviate this concern.. Another approach is to implement error checking in the individual examples that would disable further calls to the draw event. I'm fine with adding a subsequent message about suppression. Even if that one-line suppression message were printed instead of the full traceback it would be a great improvement. Then I'd only get 60 error lines per second printed instead of about 600 (behavior right now), which makes debugging a huge pain. Another approach: For some events (like paint) we could disconnect the event handler when it raises an error (and notify that we do this, of course) Yes--`EventEmitter` currently has two properties: - `print_callback_errors` causes the traceback to be logged for any callbacks that raise an exception - `ignore_callback_errors` causes the emitter to consume callback exceptions, allowing it to continue processing other callbacks. These might be replaced by a single method like ``` def set_error_behavior(ignore=True, log=True, disconnect=False): ``` Great idea. I don't like our current behavior as default because it makes debugging very difficult. Should we make disconnect True by default? If not, then we should probably still do the registry to tame the output a bit. In fact, we might want to have support for a registry anyway. WDYT? > Should we make disconnect True by default? Probably not.. one of the nice things about event-driven programming is its tolerance for intermittent failures. Making this available as a config / command-line option is a good idea, though. In fact, a global `debug` option that sets multiple appropriate flags might be nice.. > If not, then we should probably still do the registry to tame the output a bit. Yet another option is to allow something like ``` emitter.set_error_handling(log='once') ``` I am ok with adding a registry if other options are not sufficient. If you do implement this: - Definitely need a config option and command line flag for disabling it. - Can we somehow ensure that "suppressed error message" messages are not buried in non-suppressed error messages? - What are the criteria for deciding whether an error message has already been seen? A config / command-line flag should be doable. IIRC we already set the logging level on startup, in which case this would be hopefully a straightforward extension of that. In terms of the short suppression lines getting buried, I am not too worried about this case. If there are really _that_ many error messages in a user's code, then I wouldn't consider having the `"Suppressed repeated message for the Nth time" be uber-visible a high priority. To start, I'd just use strict string equivalence of the error message for the registry. We can add a tolerance / make it better later. > To start, I'd just use strict string equivalence of the error message for the registry. We can add a tolerance / make it better later. Sounds expensive--many of these tracebacks are quite large. There's also the possibility that the registry could consume memory quickly. Fuzzy matching would help avoid this, at even greater expense.. Perhaps we should just try it for now. So long as the default behavior is not affected (especially with respect to performance), I see no problem. Let's see how it works. The performance hits should only happen when there are errors, which isn't too bad a place to have a performance hit. The current behavior (printing tons of lines) has actually bogged down my IDE before due to the printing, which is itself a form of performance hit :) I bepeive the logger module can be handed a file or a stream, which sys.stderr is.
2014-08-02T20:08:42
vispy/vispy
404
vispy__vispy-404
[ "388" ]
9dc61deb2eb5124f0166b92b0af1ade210b68f87
diff --git a/vispy/color/_color.py b/vispy/color/_color.py --- a/vispy/color/_color.py +++ b/vispy/color/_color.py @@ -303,6 +303,26 @@ def __eq__(self, other): return np.array_equal(self._rgba, other._rgba) ########################################################################### + def __getitem__(self, item): + if isinstance(item, tuple): + raise ValueError('ColorArray indexing is only allowed along ' + 'the first dimension.') + subrgba = self._rgba[item] + if subrgba.ndim == 1: + assert len(subrgba) == 4 + elif subrgba.ndim == 2: + assert subrgba.shape[1] in (3, 4) + return ColorArray(subrgba) + + def __setitem__(self, item, value): + if isinstance(item, tuple): + raise ValueError('ColorArray indexing is only allowed along ' + 'the first dimension.') + # value should be a RGBA array, or a ColorArray instance + if isinstance(value, ColorArray): + value = value.rgba + self._rgba[item] = value + # RGB(A) @property def rgba(self):
diff --git a/vispy/color/tests/test_color.py b/vispy/color/tests/test_color.py --- a/vispy/color/tests/test_color.py +++ b/vispy/color/tests/test_color.py @@ -23,6 +23,28 @@ def test_color(): assert_array_equal(x.hsv, [240, 1, 1]) +def test_color_array(): + """Basic tests for ColorArray class""" + x = ColorArray(['r', 'g', 'b']) + assert_array_equal(x.rgb, np.eye(3)) + # Test ColorArray.__getitem__. + assert isinstance(x[0], ColorArray) + assert isinstance(x[:], ColorArray) + assert_array_equal(x.rgba[:], x[:].rgba) + assert_array_equal(x.rgba[0], x[0].rgba.squeeze()) + assert_array_equal(x.rgba[1:3], x[1:3].rgba) + assert_raises(ValueError, x.__getitem__, (0, 1)) + # Test ColorArray.__setitem__. + x[0] = 0 + assert_array_equal(x.rgba[0, :], np.zeros(4)) + assert_array_equal(x.rgba, x[:].rgba) + x[1] = 1 + assert_array_equal(x[1].rgba, np.ones((1, 4))) + x[:] = .5 + assert_array_equal(x.rgba, .5 * np.ones((3, 4))) + assert_raises(ValueError, x.__setitem__, (0, 1), 0) + + def test_color_interpretation(): """Test basic color interpretation API""" # test useful ways of single color init
In ColorArray, implement __getitem__ and __setitem__ To access a single or a subset of colors.
PR welcome :)
2014-08-17T09:56:08