@akelad Thanks for the Response! The above-mentioned Problem is due to a dependency prompt-toolkit, which I have installed later. But now, I get a new error as follows,
Traceback (most recent call last):
File “c:\ica_trial\rasa\rasa\core\registry.py”, line 20, in policy_from_module_path
return class_from_module_path(module_path, lookup_path=“rasa.core.policies”)
File “c:\ica_trial\rasa\rasa\utils\common.py”, line 174, in class_from_module_path
m = importlib.import_module(module_name)
File “c:\apps\sa2446\lib\importlib_init_.py”, line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File “”, line 1006, in _gcd_import
File “”, line 983, in _find_and_load
File “”, line 967, in _find_and_load_unlocked
File “”, line 677, in _load_unlocked
File “”, line 728, in exec_module
File “”, line 219, in _call_with_frames_removed
File “C:\ICA\restaurantbot\custom_keras_policy.py”, line 47, in
from Lstm import CUSTOM_LSTM
File “C:\ICA\restaurantbot\ProgLstm.py”, line 16, in
from keras.engine.base_layer import Layer, disable_tracking, InputSpec
ImportError: cannot import name ‘disable_tracking’ from ‘keras.engine.base_layer’ (c:\apps\sa2446\lib\site-packages\keras\engine\base_layer.py)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “c:\ica_trial\rasa\rasa\core\policies\ensemble.py”, line 316, in from_dict
constr_func = registry.policy_from_module_path(policy_name)
File “c:\ica_trial\rasa\rasa\core\registry.py”, line 22, in policy_from_module_path
raise ImportError(“Cannot retrieve policy from path ‘{}’”.format(module_path))
ImportError: Cannot retrieve policy from path ‘custom_keras_policy.Custom_KerasPolicy’
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “C:\Apps\sa2446\Scripts\rasa-script.py”, line 11, in
load_entry_point(‘rasa’, ‘console_scripts’, ‘rasa’)()
File “c:\ica_trial\rasa\rasa_main_.py”, line 76, in main
cmdline_arguments.func(cmdline_arguments)
File “c:\ica_trial\rasa\rasa\cli\train.py”, line 111, in train_core
kwargs=kwargs,
File “c:\ica_trial\rasa\rasa\train.py”, line 252, in train_core
kwargs=kwargs,
File “c:\apps\sa2446\lib\asyncio\base_events.py”, line 579, in run_until_complete
return future.result()
File “c:\ica_trial\rasa\rasa\train.py”, line 308, in train_core_async
kwargs=kwargs,
File “c:\ica_trial\rasa\rasa\train.py”, line 340, in _train_core_with_validated_data
kwargs=kwargs,
File “c:\ica_trial\rasa\rasa\core\train.py”, line 42, in train
policies = config.load(policy_config)
File “c:\ica_trial\rasa\rasa\core\config.py”, line 28, in load
return PolicyEnsemble.from_dict(config_data)
File “c:\ica_trial\rasa\rasa\core\policies\ensemble.py”, line 324, in from_dict
“”.format(policy_name)
rasa.core.policies.ensemble.InvalidPolicyConfig: Module for policy ‘custom_keras_policy.Custom_KerasPolicy’ could not be loaded. Please make sure the name is a valid policy.
This is my config file,
language: “en”
pipeline:
-
name: “WhitespaceTokenizer”
-
name: “RegexFeaturizer”
-
name: “CountVectorsFeaturizer”
-
name: “EmbeddingIntentClassifier”
hidden_layers_sizes_a: [1024, 512]
hidden_layers_sizes_b:
batch_size: [256, 1054]
epochs: 100
embed_dim: 20
mu_pos: 0.8 # should be 0.0 < … < 1.0 for ‘cosine’
mu_neg: -0.4 # should be -1.0 < … < 1.0 for ‘cosine’
similarity_type: “cosine” # string ‘cosine’ or ‘inner’
num_neg: 20
use_max_sim_neg: true
C2: 0.0015
C_emb: 0.6
droprate: 0.25
intent_tokenization_flag: true
intent_split_symbol: “+”
evaluate_every_num_epochs: 10 # small values may hurt performance
evaluate_on_num_examples: 1000 # large values may hurt performance
-
name: “CRFEntityExtractor”
BILOU_flag: true
max_iterations: 50
L1_c: 0.1
L2_c: 0.1
-
name: “EntitySynonymMapper”
policies:
- name: “custom_keras_policy.Custom_KerasPolicy”
batch_size: 100
epochs: 100
validation_split: 0.2
- name: MemoizationPolicy
- name: MappingPolicy
My custom model_architecture has shown below,
def model_architecture(self, data_path) -> keras.models.Models:
def train(self, data_path):
#Get encoder inputs, decoder inputs and decoder targets from each splitted data to train in progressive encoder
en_in_data1, en_ti = EncoderData.encoder_data(self, data_path, column)
de_in_data1, de_out_data1, de_ti = DecoderData.decoder_data(self, data_path, column)
_, _,num_encoder_tokens, _ = EncoderData.data_prep(self, data_path, column)
_, _,num_decoder_tokens, _, _ = DecoderData.data_prep(self, data_path, column)
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_sequences = True, return_state=True)
encoder1 = LSTM(latent_dim, return_state=True)
en_out, e_h, e_c = encoder(encoder_inputs)
e_s = [e_h, e_c]
encoder_outputs, state_h, state_c = encoder1(en_out, initial_state=e_s)
encoder_states = [state_h, state_c]
logger.info(f"encoder_input_shape:{encoder_inputs}")
logger.info(f"encoder output shape:{encoder_outputs}")
logger.info(f"-----------------------------------------------------------------")
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_lstm1 = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs1, d_h, d_c = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
d_s = [d_h, d_c]
decoder_outputs, de_h, de_c = decoder_lstm1(decoder_outputs1, initial_state=d_s)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
#Define Model input and output shapes for initial column
model1 = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model1.compile(optimizer='rmsprop', loss='categorical_crossentropy',
metrics=['accuracy'])
print(model1.summary())
return model1
model1 = train(self, data_path)