KeyError: 'text'

i am trying to update my chatbot from 1.10 to 2.8.9 and while doing this I am getting this error (KeyError: ‘text’). i have looked in this also http://forum.rasa.com/t/message-object-does-not-recognize-text-attribute-in-custom-component-for-rasa-2-0/36313/11 but it didn’t help.


import re

import os

import joblib

import pickle

import typing

from typing import Any, Optional, Text, Dict, List, Type

from rasa.nlu.components import Component

from rasa.nlu.config import RasaNLUModelConfig

from rasa.shared.nlu.training_data.message import Message

from rasa.shared.nlu.training_data.training_data import TrainingData

from rasa.nlu import utils

import rasa.utils.io as io_utils

if typing.TYPE_CHECKING:

    from rasa.nlu.model import Metadata

cooking_methhods = {

    "roasting": ["roasting", "barbecuing", "grilling", "rotisserie", "searing"],

    "baking": ["baking", "baking blind", "flashbaking"],

    "boiling": ["boiling", "blanching", "braising", "coddling", "double steaming", "infusion", "poaching", "pressure cooking", "simmering", "smothering", "steaming", "steeping", "stewing", "stone boiling", "vacuum flask cooking"],

    "frying": ["fry", "air frying", "deep frying", "gentle frying", "hot salt frying", "hot sand frying", "pan frying", "pressure frying", "sautéing", "shallow frying", "stir frying", "vacuum frying"],

    "smoking": ["smoking"]

}

utensils = ["Chef's knife", "Paring knife", "Knife sharpening/honing rod", "Bread knife", "Chopping/cutting board(s)", "Kitchen shears/scissors", "Vegetable peeler",

            "Garlic press", "Grater", "Kitchen scales", "Measuring jug", "Measuring spoons", "Measuring cups", "Mixing bowl", "Colander/pasta strainer", "Sieve", "Rolling pin", "Can opener", "Blender",

            "Frying pan/skillet", "Saucepans", "Ovenproof dish", "Roasting tin", "Baking sheet", "Stirring spoon", "Slotted spoon", "Spatula", "Tongs", "Masher", "Balloon whisk", "Oven gloves", "Pot holders", "Food/meat thermometer",

            ]

cuisines = ["Ainu","Albanian","Argentine","Andhra","Anglo-Indian","Arab","Armenian","Assyrian","Awadhi","Azerbaijani","Balochi","Belarusian","Bangladeshi","Bengali","Berber","Brazilian","Buddhist","Bulgarian","Cajun","Cantonese","Caribbean","Chechen","Chinese cuisine","Chinese Islamic","Circassian","Crimean Tatar","Cypriot","Danish","English","Estonian","French","Filipino","Georgian","German","Goan","Goan Catholic","Greek","Gujarati","Hyderabad","Indian cuisine","Indian Chinese","Indian Singaporean cuisine","Indonesian","Inuit","Irish","Italian-American","Italian cuisine","Jamaican","Japanese","Jewish","Karnataka","Kazakh","Keralite","Korean","Kurdish","Laotian","Lebanese","Latvian","Lithuanian","Louisiana Creole","Maharashtrian","Mangalorean","Malay","Malaysian Chinese cuisine","Malaysian Indian cuisine","Mediterranean cuisine","Mexican","Mordovian","Mughal","Native American","Nepalese","New Mexican","Odia","Parsi","Pashtun","Polish","Pennsylvania Dutch","Pakistani","Peranakan","Persian","Peruvian","Portuguese","Punjabi","Rajasthani","Romanian","Russian","Sami","Serbian","Sindhi","Slovak","Slovenian","Somali","South Indian","Soviet","Spanish","Sri Lankan","Taiwanese","Tatar","Thai","Turkish","Tamil","Udupi","Ukrainian","Vietnamese","Yamal","Zambian","Zanzibari"]

cuisines_raw = ["African\nNorth\nWest\nlist","North","West","list","Americas\nNorth\nSouth\nlist","North","South","list","Asian\nCentral\nIndian subcontinent\nlist","Central","Indian subcontinent","list","Caribbean","Caucasian","European\nCentral\nEastern\nlist","Central","Eastern","list","Global","Latin American","Mediterranean","Middle Eastern\nLevantine","Levantine","Oceanic","Afghan","Albanian","Algerian","American\nCalifornia\nHawaiian\nNew American\nPuerto Rican\nSoul food","California","Hawaiian","New American","Puerto Rican","Soul food","Angolan","Argentine","Armenian","Australian","Austrian","Azerbaijani","Bahraini","Bangladeshi\nSylheti","Sylheti","Barbadian","Belarusian","Belgian","Belizean","Beninese","Bhutanese","Bolivian","Bosnian-Herzegovinian","Botswana","Brazilian","British\nCornish\nEnglish\nNorthern Irish\nScottish\nWelsh","Cornish","English","Northern Irish","Scottish","Welsh","Bruneian","Bulgarian","Burkinabé","Burmese","Burundian","Cambodian","Cameroonian","Canadian\nQuebecois","Quebecois","Central African Republic","Chadian","Chilean","Chinese\nCantonese\nChinese Islamic\nHong Kong\nMacanese","Cantonese","Chinese Islamic","Hong Kong","Macanese","Colombian","Congolese","Croatian","Cuban","Cypriot","Czech","Danish\nFaroese\nGreenlandic","Faroese","Greenlandic","Djiboutian","Dominican","Dominican Republic","Dutch","East Timorese","Ecuadorian","Egyptian","Emirati","Equatorial Guinean","Eritrean","Estonian","Ethiopian","Fijian","Filipino","Finnish","French\nOccitan","Occitan","Gabonese","Gambian","Georgian","German","Ghanaian","Greek\nGreek Macedonian","Greek Macedonian","Guatemalan","Guianan","Guinea-Bissauan","Guinean","Haitian","Honduran","Hungarian","Icelandic","Indian\nArunachalese\nAssamese\nBihari\nGoan\nGujarati\nHimachal Pradesh\nKashmiri\nJharkhandi\nKarnataka\nKerala\nMaharashtrian\nManipuri\nMeghalayan\nMizo\nNaga\nOdia\nRajasthani\nSikkimese\nTamil\nTelugu","Arunachalese","Assamese","Bihari","Goan","Gujarati","Himachal Pradesh","Kashmiri","Jharkhandi","Karnataka","Kerala","Maharashtrian","Manipuri","Meghalayan","Mizo","Naga","Odia","Rajasthani","Sikkimese","Tamil","Telugu","Indonesian\nAcehnese\nArab\nBalinese\nBanjarese\nBatak\nBetawi\nChinese\nIndian\nIndo\nJavanese\nMadurese\nMakassarese\nManado\nMinangkabau\nPalembangese\nSundanese","Acehnese","Arab","Balinese","Banjarese","Batak","Betawi","Chinese","Indian","Indo","Javanese","Madurese","Makassarese","Manado","Minangkabau","Palembangese","Sundanese","Iranian","Iraqi","Irish","Israeli","Italian\nAbruzzese\nLombard\nNeapolitan\nRoman\nSardinian\nSicilian\nVenetian","Abruzzese","Lombard","Neapolitan","Roman","Sardinian","Sicilian","Venetian","Ivorian","Jamaican","Japanese","Jordanian","Kazakh","Kenyan","Korean\nNorth Korean","North Korean","Kosovan","Kuwaiti","Kyrgyz","Lao","Latvian","Lebanese","Lesotho","Liberian","Libyan","Liechtensteiner","Lithuanian","Luxembourg","Macedonian","Malagasy","Malawian","Malaysian\nChinese\nEurasian\nIndian\nSabahan\nSarawakian","Chinese","Eurasian","Indian","Sabahan","Sarawakian","Maldivian","Malian","Maltese","Mauritanian","Mauritian","Mexican","Moldovan","Monégasque","Mongolian","Montenegrin","Moroccan","Mozambican","Namibian","Nauruan","Nepalese","New Zealand","Niger","Nigerian","Niuean","Norwegian","Omani","Pakistani\nPunjabi\nSindhi","Punjabi","Sindhi","Palestinian","Panamanian","Peruvian\nChinese","Chinese","Polish","Portuguese","Qatari","Romanian","Russian","Rwandan","Saint Helena","Saint Lucian","Salvadoran","Sammarinese","São Tomé and Príncipe","Saudi Arabian","Senegalese","Serbian","Seychellois","Sierra Leonean","Singaporean","Slovak","Slovenian","Somali","South African","Spanish\nAndalusian\nAsturian\nBalearic\nBasque\nCanarian\nCantabrian\nCatalan\nExtremaduran\nGalician\nManchegan\nValencian","Andalusian","Asturian","Balearic","Basque","Canarian","Cantabrian","Catalan","Extremaduran","Galician","Manchegan","Valencian","Sri Lankan","Sudanese","Swazi","Swedish","Swiss","Syrian","Taiwanese","Tajik","Tanzanian\nZanzibari","Zanzibari","Thai","Togolese","Tunisian","Turkish","Trinidadian and Tobagonian","Ugandan","Ukrainian","Uruguayan","Uzbek","Venezuelan","Vietnamese","Western Saharan","Yemeni","Zambian","Zimbabwean","Ainu","American Chinese","Anglo-Indian","Arab","Assyrian","Balochi","Bengali","Berber","Buddhist","Cajun","Chechen","Christian","Circassian","Crimean Tatar","Goan Catholic","Greek American","Hazaragi","Hindu","Inuit","Islamic","Italian American","Jain","Jewish\nAmerican\nAshkenazi\nBukharan\nEthiopian\nMizrahi\nSephardic\nSyrian","American","Ashkenazi","Bukharan","Ethiopian","Mizrahi","Sephardic","Syrian","Komi","Kurdish","Livonian","Louisiana Creole","Malay","Mordovian","Indigenous American","Okinawan","Ossetian","Parsi","Pashtun","Pennsylvania Dutch","Peranakan","Sami","Sikh","Tatar","Tibetan","Udmurt","Yamal","Yup'ik","Ancient Egyptian","Ancient Greek","Ancient Israelite","Ancient Roman","Aztec","Byzantine","Early modern European","Historical Chinese","Historical Indian subcontinent","History of seafood","History of vegetarianism","Inca","Mayan","Medieval","Ottoman","Peasant","Soviet","Thirteen Colonies","Classique","Fast food","Fusion","Haute","Molecular gastronomy","Note by Note","Nouvelle","List of cuisines","Lists of prepared foods","Ancient Egyptian cuisine","Ancient Greek cuisine","Ancient Israelite cuisine","Ancient Roman cuisine","Cuisine of the Thirteen Colonies","Aztec cuisine","Maya cuisine","Muisca cuisine","Inca cuisine","Byzantine cuisine","Medieval cuisine","Ottoman cuisine","Early modern European cuisine","History of Argentine cuisine","History of Chinese cuisine","History of English cuisine","History of French cuisine","History of Hawaiian cuisine","History of Indian cuisine","History of Italian cuisine","History of Japanese cuisine","History of Jewish cuisine","Persian cuisine","History of Polish cuisine","History of Scottish cuisine","Breakfast","Second breakfast","Elevenses","Brunch","Lunch","Merienda","Iftar","Tea","Coffee break","Dinner","Supper","Apéritif and digestif","Hors d'oeuvre","Amuse-bouche","Entrée","Roast","Main course","Side dish","Entremet","Dessert","Savoury","Meal replacement","Snack","Buffet","Conveyor belt sushi","Dish","Finger food","Full course dinner/Multicourse meal","Platter","Service à la française","Service à la russe","Silver service","Small plates","Dining room","Eating utensils","Food presentation","Garnish","Nyotaimori","Pièce montée","Serving size","Tablecloth","Table setting","Tableware","Table manners / Eating utensil etiquette","Toast","Anju","Antipasto","Cicchetti","Banchan / Korean table d'hôte","Bandeja paisa","Dastarkhān","Dim sum / Yum cha","Fika","Izakaya / Sakana","Kaiseki","Meat and three","Meze","Plate lunch","Pu pu platter","Rijsttafel","Sadhya","Smörgåsbord","Tapas","Tiffin","Thali","Zakuski","Airline meal","Bento","Convenience food","Dosirak","Instant breakfast","Packed lunch","Take-out","TV dinner","À la carte","Table d'hôte","Blue-plate special","Combination meal","Free lunch","Free refill","Happy hour","Kids' meal","School meal","Suspended meal","Tasting menu / Degustation","Value meal","Value menu","Banquet","Barbecue","Commercium","Communal dining","Dining in","Mangal","Picnic","Potluck","Sittning","State banquet","Supra","Tableround","Tea party","Catering","Cook","Cookbook","Cooking","Culinary arts","Cuisine\noutline","outline","Drink","Eating","Food","Meal preparation","Meal delivery service","Restaurant","Waiting staff","Coffee culture","Tea culture"]

appliances = ["Air fryer","Bachelor griller","Barbecue grill","Beehive oven","Brasero (heater)","Brazier","Bread machine","Burjiko","Butane torch","Chapati maker","Cheesemelter","Chocolatera","Chorkor oven","Clome oven","Comal (cookware)","Combi steamer","Communal oven","Convection microwave","Convection oven","Corn roaster","Crepe maker","Deep fryer","Earth oven","Electric cooker","Energy regulator","Espresso machine","Field kitchen","Fire pot","Flattop grill","Food steamer","Fufu Machine","Griddle","Halogen oven","Haybox","Horno","Hot box (appliance)","Hot plate","Instant Pot","Kamado","Kitchener range","Kujiejun","Kyoto box","Makiyakinabe","Masonry oven","Mess kit","Microwave oven","Multicooker","Oven","Pancake machine","Panini sandwich grill","Popcorn maker","Pressure cooker","Pressure fryer","Reflector oven","Remoska","Rice cooker","Rice polisher","Roasting jack","Rocket mass heater","Rotimatic","Rotisserie","Russian oven","Sabbath mode","Salamander broiler","Samovar","Sandwich toaster","Self-cleaning oven","Shichirin","Slow cooker","Solar cooker","Sous-vide cooker","Soy milk maker","Stove","Susceptor","Tabun oven","Tandoor","Tangia","Thermal immersion circulator","Toaster and toaster ovens","Turkey fryer","Vacuum fryer [1]","Waffle iron","Wet grinder","Wood-fired oven","Coffee percolator","Coffeemaker","Electric water boiler","Instant hot water dispenser","Kettle"]

NER_REGEX_FILE_NAME = "ner_regex_patterns.pkl"

class ExtractFoodEnitites(Component):

    @classmethod

    def required_components(cls) -> List[Type[Component]]:

        """Specify which compenent need to be present in the pipeline"""

        return []

    # default config

    defaults = {}

    # list of supported languages

    supported_language_list = None

    # list of langauges not supported

    not_supported_language_list = None

    def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:

        super(ExtractFoodEnitites, self).__init__(component_config)

    def convert_to_rasa(self, value, confidence, entity):

        """Convert model output to rasa compatible output format"""

        entity = {

            "value": value,

            "confidence": confidence,

            "entity": entity,

            "extractor": "ner_regex"

        }

        return entity

    @staticmethod

    def create_macro_re(tokens, flags=0):

        """

        Given a dict in which keys are token names and values are lists

        of strings that signify the token, return a macro re that encodes

        the entire set of tokens.

        """

        d = {}

        for token, vals in tokens.items():

            d[token] = '(?P<{}>{})'.format(token, '|'.join(vals))

        combined = '|'.join(d.values())

        return re.compile(combined, flags)

    @staticmethod

    def find_tokens(macro_re, s):

        """

        Given a macro re constructed by `create_macro_re()` and a string,

        return a list of tuples giving the token name and actual string matched

        against the token.

        """

        found = []

        for match in re.finditer(macro_re, s):

            found.append([(t, v) for t, v in match.groupdict().items() if v is not None][0])

        return found

    def train(

        self,

        training_data: TrainingData,

        config: Optional[RasaNLUModelConfig] = None,

        **kwargs: Any,

        ) -> None:

        """Train this component"""

        with open("data/lookups/recipe_names.txt") as rn_file:

            recipename = rn_file.read().splitlines()

        with open("data/lookups/ingredients.txt") as i_file:

            ingredients = i_file.read().splitlines()

        with open("data/lookups/tags.txt") as i_file:

            tags = i_file.read().splitlines()

        train_regex = cooking_methhods

        train_regex.update({"recipename": recipename, "ingredient": ingredients, "tag": tags})

        self.train_regex = train_regex

        # ner_regex_file = os.path.join(model_dir, NER_REGEX_FILE_NAME)

        # utils.json_pickle(ner_regex_file, self)

        # joblib.dump(self, ner_regex_file)

    def process(self, message:Message, **kwargs: Any) -> None:

        """ Process an incoming message"""

        if not self.train_regex:

            entity = None

        else:

            pass

            # print(message)

            # tokens = [t.text for t in message.get("tokens")]

            self.macro_pat = self.create_macro_re(self.train_regex, re.I)

            found_entites = self.find_tokens(self.macro_pat, message.data['text'])

            for each_found_entity in found_entites:

                # print(each_found_entity)

                entity = self.convert_to_rasa(each_found_entity[1], each_found_entity[0], 0.95)

                message.set("entities", [entity], add_to_output=True)

    def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:

        """Persist this component to dist for future loading"""

        ner_regex_file = os.path.join(model_dir, NER_REGEX_FILE_NAME)

        io_utils.json_pickle(ner_regex_file, self)

        # joblib.dump(self, ner_regex_file)

        # pickle.dump(self, open(ner_regex_file, "wb"))

        return {"ner_regex_file": ner_regex_file}

    @classmethod

    def load(

        cls,

        meta: Dict[Text, Any],

        model_dir: Optional[Text] = None,

        model_metadata: Optional["Metadata"] = None,

        cached_component: Optional["Component"] = None,

        **kwargs: Any,

        ) -> "Component":

        """Load this component from file."""

        print(model_dir)

        print(meta)

        file_name = meta.get("ner_regex_file")

        ner_regex_file = os.path.join(model_dir, NER_REGEX_FILE_NAME)

        return io_utils.json_unpickle(ner_regex_file)

@ChrisRahme can you please look into it?

Please properly format the code by using backticks ``` (Under the Esc key), not quotes ‘’’

i have updated it @ChrisRahme

1 Like

Does this work?

    def process(self, message:Message, **kwargs: Any) -> None:
        """ Process an incoming message"""
        if not self.train_regex:
            entity = None
        else:
            self.macro_pat = self.create_macro_re(self.train_regex, re.I)

            try:
                found_entites = self.find_tokens(self.macro_pat, message.data['text'])
                for each_found_entity in found_entites:
                    # print(each_found_entity)
                    entity = self.convert_to_rasa(each_found_entity[1], each_found_entity[0], 0.95)
                    message.set("entities", [entity], add_to_output=True)
            except:
                pass
1 Like

hey @ChrisRahme i am really sorry for late reply and it worked, now i am getting same error (KeyError: ‘text’) from diiferent custom component… can you help with this also in process function.

here is code

import re
import os

import joblib

import typing
from typing import Any, Optional, Text, Dict, List, Type

from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.shared.nlu.training_data.message import Message
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.nlu import utils
import rasa.utils.io as io_utils

if typing.TYPE_CHECKING:
	from rasa.nlu.model import Metadata

from rsearch.tfidf_qa_se import TFIDFQA

KB_FILE_NAME = "tfidf_qa_se.pkl"

class SEQuestionAnswer(Component):
	
	@classmethod
	def required_components(cls) -> List[Type[Component]]:
		"""Specify which compenent need to be present in the pipeline"""

		return []

	# default config
	defaults = {}

	# list of supported languages
	supported_language_list = None

	# list of langauges not supported
	not_supported_language_list = None

	def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
		super(SEQuestionAnswer, self).__init__(component_config)

	def convert_to_rasa(self, value, confidence, entity):
		"""Convert model output to rasa compatible output format"""

		entity = {
			"value": value,
			"confidence": confidence,
			"entity": entity,
			"extractor": "ner_regex"
		}
		return entity

	def train(
		self,
		training_data: TrainingData,
		config: Optional[RasaNLUModelConfig] = None,
		**kwargs: Any,
		) -> None:
		"""Train this component"""

		self.es_tfidf = TFIDFQA()


	def process(self, message:Message, **kwargs: Any) -> None:
		""" Process an incoming message"""
		body = {"query": {"match": {"Title": message.data['text']}}}
		es_response = self.es_tfidf.search(body=body)
		for each_qa in es_response["hits"]["hits"][:20]:
			
			if each_qa["_score"] > 4:
				qa_found = each_qa["_source"]["Title"]

				entity = self.convert_to_rasa(each_qa["_source"], "question", 0.95)
				message.set("entities", [entity], add_to_output=True)

	def persist(self, file_name: Text, model_dir: Text) -> Optional[Dict[Text, Any]]:
		"""Persist this component to dist for future loading"""
		ner_regex_file = os.path.join(model_dir, KB_FILE_NAME)
		# utils.json_pickle(ner_regex_file, self)
		# joblib.dump(self, ner_regex_file)
		io_utils.json_pickle(ner_regex_file, self)
		return {"knowledgebase_file": ner_regex_file}
	
	@classmethod
	def load(
		cls,
		meta: Dict[Text, Any],
		model_dir: Optional[Text] = None,
		model_metadata: Optional["Metadata"] = None,
		cached_component: Optional["Component"] = None,
		**kwargs: Any,
		) -> "Component":
		"""Load this component from file."""

		file_name = meta.get("knowledgebase_file")
		ner_regex_file = os.path.join(model_dir, KB_FILE_NAME)
		return io_utils.json_unpickle(ner_regex_file)

Just do the same thing

	def process(self, message:Message, **kwargs: Any) -> None:
		""" Process an incoming message"""
        try:
            body = {"query": {"match": {"Title": message.data['text']}}}
            es_response = self.es_tfidf.search(body=body)

            for each_qa in es_response["hits"]["hits"][:20]:	
                if each_qa["_score"] > 4:
                    qa_found = each_qa["_source"]["Title"]
                    entity = self.convert_to_rasa(each_qa["_source"], "question", 0.95)
                    message.set("entities", [entity], add_to_output=True)

        except:
            pass
1 Like

Thank you for marking my 100th solution, @sdhaker2 :partying_face:

Always glad to be helping the community :slight_smile:

1 Like

Thank you @ChrisRahme for helping me. And congratulation on the 100th solution.

1 Like