diff --git a/learning/python/model_perfspec.py b/learning/python/model_perfspec.py
new file mode 100644
index 0000000..e4b6cd7
--- /dev/null
+++ b/learning/python/model_perfspec.py
@@ -0,0 +1,493 @@
+import marimo
+
+__generated_with = "0.10.16"
+app = marimo.App(width="medium")
+
+
+@app.cell(hide_code=True)
+def title():
+ import marimo as mo
+ notebook_name = 'model_perfspec.py'
+
+ from lib_perfspec import perfspec_vars
+ (_,_defs) = perfspec_vars.run()
+ perfspec = _defs['perfspec']
+
+ from lib_perfspec import perfspec_header
+ (_,_defs) = perfspec_header.run()
+ lib_header = _defs['header']
+ lib_intro = _defs['intro']
+
+ mo.md(
+ f"""
+ {lib_header(notebook_name)}
+
+ ## Info about **{perfspec['app']['train_mode']}** trained model
+ """
+ )
+ return (
+ lib_header,
+ lib_intro,
+ mo,
+ notebook_name,
+ perfspec,
+ perfspec_header,
+ perfspec_vars,
+ )
+
+
+@app.cell(hide_code=True)
+def imports():
+ from pathlib import Path
+ import numpy as np
+ return Path, np
+
+
+@app.cell(hide_code=True)
+def intro_load(Path, lib_intro, mo, notebook_name, perfspec):
+ verbose = perfspec['settings']['verbose']
+ perfspec['vars'] = {}
+
+ from lib_perfspec import perfspec_args
+ (_,_defs) = perfspec_args.run()
+
+ if not Path(perfspec['defaults']['models_dirpath']).exists():
+ exit(f"Trained models dir path not found: {perfspec['defaults']['models_dirpath']}")
+
+ if not Path(perfspec['defaults']['checkpoints_dirpath']).exists():
+ exit(f"Trained checkpoints models dir path not found: {perfspec['defaults']['checkpoints_dirpath']}")
+
+ if not Path(perfspec['defaults']['data_dirpath']).exists():
+ exit(f"data dir path not found: {perfspec['defaults']['data_dirpath']}")
+
+ verbose=perfspec['settings'].get('verbose')
+
+ from lib_perfspec import perfspec_load_actions
+ (_,_defs) = perfspec_load_actions.run()
+ lib_load_actions = _defs['load_actions']
+
+ from lib_perfspec import perfspec_input_sequence
+ (_,_defs) = perfspec_input_sequence.run()
+ lib_get_input_sequence = _defs['get_input_sequence']
+
+ from lib_perfspec import perfspec_predict
+ _, _defs = perfspec_predict.run()
+ lib_predict_action = _defs['predict_action']
+
+ perfspec['vars']['model'] = None
+ perfspec['vars']['history'] = None
+
+ (perfspec['vars']['actions'],
+ perfspec['vars']['unique_actions'],
+ perfspec['vars']['label_encoder'],
+ perfspec['vars']['encoded_actions']
+ ) = lib_load_actions(
+ actions_path=perfspec['settings'].get('actions_filepath'),
+ verbose=None
+ )
+
+ perfspec['vars']['input_sequence'] = lib_get_input_sequence(
+ input_str=perfspec['settings']['input_str'],
+ unique_actions=perfspec['vars']['unique_actions']
+ )
+
+ from train_perfspec import perfspec_prepare_model_train
+ (_,_defs) = perfspec_prepare_model_train.run()
+ lib_prepare_train = _defs['prepare_train']
+
+ from train_perfspec import perfspec_load_model_from_path
+ (_,_defs) = perfspec_load_model_from_path.run()
+ lib_load_model_from_path = _defs['load_model_from_path']
+
+ from train_perfspec import perfspec_evaluate_model
+ (_,_defs) = perfspec_evaluate_model.run()
+ lib_evaluate_model = _defs['evaluate_model']
+ lib_run_evaluate = _defs['run_evaluate']
+ lib_history_info = _defs['history_info']
+
+ from train_perfspec import perfspec_plot_history
+ (_,_defs) = perfspec_plot_history.run()
+ plot_history = _defs['plot_history']
+
+ from train_perfspec import perfspec_plot_defs
+ (_,_defs) = perfspec_plot_defs.run()
+ lib_plot_accuracy = _defs['plot_accuracy']
+ lib_plot_loss = _defs['plot_loss']
+ lib_plot_precision = _defs['plot_precision']
+
+ from train_perfspec import perfspec_define_confusion_matrix
+ (_,_defs) = perfspec_define_confusion_matrix.run()
+ lib_make_confusion_matrix = _defs['make_confusion_matrix']
+
+ mo.md(
+ f"""
+
+ {lib_intro(notebook_name)}
+
+ """
+ )
+ return (
+ lib_evaluate_model,
+ lib_get_input_sequence,
+ lib_history_info,
+ lib_load_actions,
+ lib_load_model_from_path,
+ lib_make_confusion_matrix,
+ lib_plot_accuracy,
+ lib_plot_loss,
+ lib_plot_precision,
+ lib_predict_action,
+ lib_prepare_train,
+ lib_run_evaluate,
+ perfspec_args,
+ perfspec_define_confusion_matrix,
+ perfspec_evaluate_model,
+ perfspec_input_sequence,
+ perfspec_load_actions,
+ perfspec_load_model_from_path,
+ perfspec_plot_defs,
+ perfspec_plot_history,
+ perfspec_predict,
+ perfspec_prepare_model_train,
+ plot_history,
+ verbose,
+ )
+
+
+@app.cell(hide_code=True)
+def perfspec_render_model_browser(mo, perfspec):
+ model_file_browser = mo.ui.file_browser(
+ initial_path=perfspec['defaults']['models_dirpath'],
+ multiple=False,
+ filetypes=['.keras'],
+ selection_mode='file',
+ restrict_navigation=True,
+ #label="Model",
+ )
+
+ def parse_model_browse_selection_value(values_only=True):
+ if len(model_file_browser.value) > 0 and not reset_model_button.value:
+ if values_only:
+ return model_file_browser.value[0].path
+ else:
+ return f"Selection {model_file_browser.value[0].path}"
+ else:
+ if values_only:
+ return ""
+ else:
+ return f"Use default value"
+
+ reset_model_button = mo.ui.button(label="Reset selected Model", kind="neutral",
+ value=False, on_click=lambda value: True if not value else False)
+ return (
+ model_file_browser,
+ parse_model_browse_selection_value,
+ reset_model_button,
+ )
+
+
+@app.cell(hide_code=True)
+def _(
+ mo,
+ model_file_browser,
+ parse_model_browse_selection_value,
+ reset_model_button,
+):
+ mo.md(
+ f"""
+ { mo.vstack(items=[
+ mo.md(""" ## Model Selection
+ #### Select a Model or use default one
+ """),
+ model_file_browser,
+ mo.hstack(items=[
+ " ",
+ reset_model_button,
+ ]),
+ mo.hstack(items=[
+ mo.md(f"{parse_model_browse_selection_value(False)}"),
+ mo.md(f"reset: {reset_model_button.value}"),
+ ]),
+ ])
+ }
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def settings(
+ Path,
+ lib_load_model_from_path,
+ mo,
+ parse_model_browse_selection_value,
+ perfspec,
+):
+ _model_filepath = parse_model_browse_selection_value(True)
+ if _model_filepath != "":
+ perfspec["settings"]["model_filepath"] = _model_filepath
+ perfspec["settings"]["model_history_filepath"] = (
+ Path(_model_filepath).parent / perfspec["defaults"]["history_path"]
+ )
+
+ if perfspec["settings"]["verbose"] != None or mo.running_in_notebook():
+ print(f"Model filepath: {perfspec['settings']['model_filepath']}")
+ print(
+ f"History filepath: {perfspec['settings']['model_history_filepath']}"
+ )
+
+
+ perfspec["vars"]["model"] = lib_load_model_from_path(perfspec, None)
+ if perfspec["vars"]["model"] == None:
+ exit("No model loaded !")
+ return
+
+
+@app.cell(hide_code=True)
+def command_line_options(mo, notebook_name):
+ from lib_perfspec import perfspec_out_settings
+ (_,_defs) = perfspec_out_settings.run()
+ out_settings = _defs['out_settings']
+
+ mo.md(out_settings(notebook_name))
+ return out_settings, perfspec_out_settings
+
+
+@app.cell(hide_code=True)
+def load_trainded_model(mo, notebook_name):
+ from lib_perfspec import perfspec_cli_ops
+ (_,_defs) = perfspec_cli_ops.run()
+ out_cli_ops = _defs['out_cli_ops']
+ mo.accordion({
+ "Mostrar command Line options ": out_cli_ops(notebook_name)
+ })
+ return out_cli_ops, perfspec_cli_ops
+
+
+@app.cell(hide_code=True)
+def model_summary(lib_load_model_from_path, mo, perfspec):
+ def load_trained_model():
+ if perfspec['vars']['model'] == None:
+ _verbose = "1" if mo.running_in_notebook() else perfspec['settings']['verbose']
+ perfspec['vars']['model'] = lib_load_model_from_path(perfspec,_verbose)
+ if perfspec['vars']['model'] == None:
+ print ("No model loaded !")
+
+ mo.md(
+ r"""
+ ## Load trained model
+ """
+ )
+ return (load_trained_model,)
+
+
+@app.cell(hide_code=True)
+def evaluate_mode(load_trained_model, mo, perfspec):
+ def model_sumary():
+ load_trained_model()
+ if perfspec['vars']['model'] != None:
+ perfspec['vars']['model'].summary()
+
+ _summary = model_sumary()
+ mo.md(
+ f"""
+ ## Model Summary
+ """
+ )
+ return (model_sumary,)
+
+
+@app.cell(hide_code=True)
+def history_info(lib_history_info, lib_run_evaluate, mo, perfspec):
+ _evaluate_run=lib_run_evaluate(perfspec)
+ _history=lib_history_info(perfspec)
+ mo.md(
+ r"""
+ ## Evaluate Model
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def model_plot_accuracy(lib_history_info, mo, perfspec):
+ _history=lib_history_info(perfspec)
+ mo.md(
+ f"""
+ ## History Model info
+
+ {mo.md(_history)}
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def model_plot_loss(lib_plot_accuracy, mo, perfspec):
+ _plot_acc=lib_plot_accuracy(perfspec)
+
+ if perfspec['vars']['history'] != None and mo.running_in_notebook():
+ _output = mo.as_html(_plot_acc.gcf())
+ else:
+ _output = None
+
+ mo.md(
+ f"""
+ ## Model Accuracy history
+
+ From model train plot accuracy and epochs
+
+ {_output}
+
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def model_plot_precision(lib_plot_loss, mo, perfspec):
+ _plot_loss = lib_plot_loss(perfspec)
+
+ if perfspec['vars']['history'] != None and mo.running_in_notebook():
+ _output = mo.as_html(_plot_loss.gcf())
+ else:
+ _output = None
+
+ mo.md(
+ f"""
+ ## Model loss history
+ From model train loss
+
+ {_output}
+
+ """
+ )
+ return
+
+
+@app.cell(hide_code=True)
+def confusion_matrix(lib_plot_precision, mo, perfspec):
+ _plt_pre = lib_plot_precision(perfspec)
+ if _plt_pre is not None:
+ mo.md(
+ f"""
+
+ From model train plot Precision
+
+ {mo.as_html(_plt_pre.gcf())}
+
+ """
+ )
+ return
+
+
+@app.cell
+def _(lib_load_model_from_path, lib_make_confusion_matrix, mo, perfspec):
+ if mo.running_in_notebook():
+ if perfspec['vars'].get('model') == None:
+ lib_load_model_from_path(perfspec['settings']['verbose'])
+ if perfspec['vars'].get('model') != None:
+
+ lib_make_confusion_matrix(perfspec)
+
+ mo.md("### Confusion Matrix")
+ return
+
+
+@app.cell(hide_code=True)
+def title_show_values_prediction(mo):
+ mo.md(
+ f"""
+ ## Run Model Prediction
+
+ To explore in interactive mode use **run_perfspec.py** notebook
+ """
+ ).callout('neutral')
+ return
+
+
+@app.cell(hide_code=True)
+def show_values_prediction(mo, perfspec):
+ def on_show_values_prediction():
+ if perfspec['settings']['sequence_length'] > 1 or len(perfspec['vars']['unique_actions']) == 0:
+ return ""
+ else:
+ return f"""
+ ## Show Values Prediction
+ """
+
+ mo.md(on_show_values_prediction())
+ return (on_show_values_prediction,)
+
+
+@app.cell(hide_code=True)
+def title_run_prediction(
+ lib_predict_action,
+ load_trained_model,
+ mo,
+ perfspec,
+):
+ def show_values_prediction():
+ if perfspec['settings']['sequence_length'] > 1 or len(perfspec['vars']['unique_actions']) == 0:
+ return None
+ #if mo.running_in_notebook():
+ import pandas as pd
+ if perfspec['vars']['model'] == None:
+ load_trained_model()
+ if perfspec['vars']['model'] != None:
+ return None
+ data=[]
+ for act in perfspec['vars']['unique_actions']:
+ #print (act)
+ (_,prediction) = lib_predict_action(
+ perfspec['vars']['model'],
+ perfspec['settings']['sequence_length'],
+ [act],
+ perfspec['vars']['label_encoder'],
+ "-1"
+ )
+ data.append({"action": act, 'prediction': prediction['action'][0], 'value': prediction['max_value']})
+ df_res = pd.DataFrame(data)
+ transformed_df = mo.ui.dataframe(df_res)
+ return (transformed_df)
+
+ data_frame=show_values_prediction()
+ data_frame
+ return data_frame, show_values_prediction
+
+
+@app.cell
+def _(
+ lib_get_input_sequence,
+ lib_history_info,
+ lib_predict_action,
+ lib_run_evaluate,
+ mo,
+ model_sumary,
+ perfspec,
+):
+ _verbose = "1" if mo.running_in_notebook() else perfspec['settings']['verbose']
+ _input_sequence = lib_get_input_sequence(perfspec['settings']['input_str'],perfspec['vars']['unique_actions'])
+ if len(_input_sequence) > 0:
+ #_model = lib_load_model_from_path(_verbose)
+ if _verbose == "1":
+ model_sumary()
+ lib_run_evaluate(perfspec)
+ lib_history_info(perfspec)
+ print ("\nPrediction")
+
+ if perfspec['vars']['model'] != None:
+ (encoded_input,predicted_probabilities) = lib_predict_action(
+ perfspec['vars']['model'],
+ perfspec['settings']['sequence_length'],
+ _input_sequence,
+ perfspec['vars']['label_encoder'],
+ _verbose
+ )
+ else:
+ print (f"No model found !")
+ return encoded_input, predicted_probabilities
+
+
+if __name__ == "__main__":
+ app.run()