The problem is that this happened at 40%/epochs:
File "/opt/venv/bin/rasa", line 8, in <module>
sys.exit(main())
File "/opt/venv/lib/python3.8/site-packages/rasa/__main__.py", line 118, in main
cmdline_arguments.func(cmdline_arguments)
File "/opt/venv/lib/python3.8/site-packages/rasa/cli/train.py", line 59, in <lambda>
train_parser.set_defaults(func=lambda args: run_training(args, can_exit=True))
File "/opt/venv/lib/python3.8/site-packages/rasa/cli/train.py", line 91, in run_training
training_result = train_all(
File "/opt/venv/lib/python3.8/site-packages/rasa/api.py", line 109, in train
return rasa.utils.common.run_in_loop(
File "/opt/venv/lib/python3.8/site-packages/rasa/utils/common.py", line 296, in run_in_loop
result = loop.run_until_complete(f)
File "uvloop/loop.pyx", line 1456, in uvloop.loop.Loop.run_until_complete
File "/opt/venv/lib/python3.8/site-packages/rasa/model_training.py", line 108, in train_async
return await _train_async_internal(
File "/opt/venv/lib/python3.8/site-packages/rasa/model_training.py", line 288, in _train_async_internal
await _do_training(
File "/opt/venv/lib/python3.8/site-packages/rasa/model_training.py", line 334, in _do_training
model_path = await _train_nlu_with_validated_data(
File "/opt/venv/lib/python3.8/site-packages/rasa/model_training.py", line 758, in _train_nlu_with_validated_data
await rasa.nlu.train.train(
File "/opt/venv/lib/python3.8/site-packages/rasa/nlu/train.py", line 111, in train
interpreter = trainer.train(training_data, **kwargs)
File "/opt/venv/lib/python3.8/site-packages/rasa/nlu/model.py", line 221, in train
component.train(working_data, self.config, **context)
File "/opt/venv/lib/python3.8/site-packages/rasa/nlu/classifiers/diet_classifier.py", line 880, in train
self.model.fit(
File "/opt/venv/lib/python3.8/site-packages/rasa/utils/tensorflow/temp_keras_modules.py", line 190, in fit
tmp_logs = train_function(iterator)
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 885, in __call__
result = self._call(*args, **kwds)
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py", line 917, in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 3039, in __call__
return graph_function._call_flat(
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 1963, in _call_flat
return self._build_call_outputs(self._inference_function.call(
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/function.py", line 591, in call
outputs = execute.execute(
File "/opt/venv/lib/python3.8/site-packages/tensorflow/python/eager/execute.py", line 59, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
tensorflow.python.framework.errors_impl.ResourceExhaustedError: OOM when allocating tensor with shape[160,765,765] and type float on /job:localhost/replica:0/task:0/device:CPU:0 by allocator cpu
[[node zeros_like_40 (defined at /lib/python3.8/site-packages/rasa/utils/tensorflow/models.py:158) ]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info. This isn't available when running in Eager mode.
[Op:__inference_train_function_87688]
Errors may have originated from an input operation.
Input Source operations connected to node zeros_like_40:
cond_1/PartitionedCall (defined at /lib/python3.8/site-packages/tensorflow_addons/text/crf.py:202)
Function call stack:
train_function
And it stopped…
