My code looks like the following:
async def parse(text: Text, agent: Agent):
response = await agent.handle_text(text)
return response
Initialize multiple agents:
# create skill agents based on NLU and dialog models
def create_agents(skills, endpoints):
agents = {}
for skill in skills:
nlu_model_path = "./models/nlu/" + skill + "/nlu"
interpreter = RasaNLUInterpreter(nlu_model_path)
core_model_path = 'models/dialogue/' + skill
agent = Agent.load(core_model_path, interpreter=interpreter, action_endpoint=endpoints.action)
agents[skill] = agent
return agents
def generate_dialog(message)
current_skill, current_agent = get_agent(message, all_agents, classification_interpreter)
if current_agent:
json_result = asyncio.run(parse(message, current_agent, current_skill))
...
return json_result
The problem with this code is that, when I call “generate_dialog” multiple times to process text from a file, after about processing 200 sentences, the bot quickly slows down.
No memory increases but cpu runs at about 100% at the time. What might cause this? Is that because the new “asyncio.run” or the similar ‘run_until_complete’?
Add a little more message: If i comment out the following function “await self._predict_and_execute_next_action(message, tracker)”, the speed becomes normal and stable:
async def handle_message(self, message: UserMessage, current_skill,) -> \
Optional[List[Text]]:
"""Handle a single message with this processor."""
# preprocess message if necessary
tracker = await self.log_message(message, skill_list, current_skill, turn)
if not tracker:
return None
await self._predict_and_execute_next_action(message, tracker)
self._save_tracker(tracker)
if isinstance(message.output_channel, CollectingOutputChannel):
nlu_result = (tracker.latest_message_copy.intent, copy.deepcopy(tracker.slots).items(),
tracker.latest_message_copy.entities)
return [message.output_channel.messages, nlu_result]
else:
return None