Skip to content

toolboxv2 API Reference

This section provides an API reference for key components directly available from the toolboxv2 package.

Core Application & Tooling

toolboxv2.AppType

Source code in toolboxv2/utils/system/types.py
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str
    is_server:bool = False

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    cluster_manager: ClusterManager
    root_blob_storage: BlobStorage
    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    websocket_handlers: dict[str, dict[str, Callable]] = {}
    _rust_ws_bridge: Any = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    def start_server(self):
        from toolboxv2.utils.system.api import manage_server
        if self.is_server:
            return
        manage_server("start")
        self.is_server = False

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_external_mods(self):
        """proxi attr"""

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_bg_task_advanced(self, task, *args, **kwargs):
        """
        proxi attr
        """

    def wait_for_bg_tasks(self, timeout=None):
        """
        proxi attr
        """

    def run_bg_task(self, task):
        """
                run a async fuction
                """
    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300,
                          websocket_handler: str | None = None,):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           websocket_handler: str | None = None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)

debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1426
1427
async def a_exit(self):
    """proxi attr"""

a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1491
1492
1493
1494
1495
1496
1497
1498
1499
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """

a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1417
1418
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""

a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1513
1514
1515
1516
1517
1518
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""

a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1471
1472
1473
1474
1475
1476
1477
1478
1479
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""

debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1308
1309
def debug_rains(self, e):
    """proxi attr"""

disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1296
1297
1298
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""

execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)

exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1420
1421
def exit(self):
    """proxi attr"""

exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1284
1285
1286
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""

fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1481
1482
1483
1484
1485
1486
1487
1488
1489
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """

get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""

get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1698
1699
def get_autocompletion_dict(self):
    """proxi attr"""

get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1432
1433
1434
1435
1436
1437
1438
1439
1440
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """

get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1520
1521
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""

get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1701
1702
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""

hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1288
1289
1290
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""

inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1357
1358
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""

load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1388
1389
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""

load_external_mods() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1385
1386
async def load_external_mods(self):
    """proxi attr"""

load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1379
1380
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""

mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def mod_online(self, mod_name, installed=False):
    """proxi attr"""

print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1523
1524
1525
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""

print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
1406
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")

reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1408
1409
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""

remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1414
1415
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""

rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1317
1318
def rrun_flows(self, name, **kwargs):
    """proxi attr"""

run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1442
1443
1444
1445
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """

run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1507
1508
1509
1510
1511
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""

run_bg_task(task)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1457
1458
1459
1460
def run_bg_task(self, task):
    """
            run a async fuction
            """

run_bg_task_advanced(task, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1447
1448
1449
1450
def run_bg_task_advanced(self, task, *args, **kwargs):
    """
    proxi attr
    """

run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1314
1315
def run_flows(self, name, **kwargs):
    """proxi attr"""

run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1461
1462
1463
1464
1465
1466
1467
1468
1469
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""

run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1501
1502
1503
1504
1505
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""

save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1695
1696
def save_autocompletion_dict(self):
    """proxi attr"""

save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1376
1377
def save_exit(self):
    """proxi attr"""

save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1363
1364
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""

save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1360
1361
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""

save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1429
1430
def save_load(self, modname, spec='app'):
    """proxi attr"""

save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1704
1705
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""

set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1311
1312
def set_flows(self, r):
    """proxi attr"""

set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1300
1301
def set_logger(self, debug=False):
    """proxi attr"""

show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1292
1293
1294
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""

sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1527
1528
1529
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""

tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None, websocket_handler=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       websocket_handler: str | None = None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)

wait_for_bg_tasks(timeout=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1452
1453
1454
1455
def wait_for_bg_tasks(self, timeout=None):
    """
    proxi attr
    """

watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1411
1412
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""

web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1423
1424
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""

toolboxv2.MainTool

Source code in toolboxv2/utils/system/main_tool.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.tools = kwargs.get("tool", {})
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
            self.on_exit =self.app.tb(
                mod_name=self.name,
                name=kwargs.get("on_exit").__name__,
                version=self.version if hasattr(self, 'version') else "0.0.0",
            )(kwargs.get("on_exit"))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start"))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.tools = kwargs.get("tool", {})
    self.logger = kwargs.get("logs", get_logger())
    self.color = kwargs.get("color", "WHITE")
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
        self.on_exit =self.app.tb(
            mod_name=self.name,
            name=kwargs.get("on_exit").__name__,
            version=self.version if hasattr(self, 'version') else "0.0.0",
        )(kwargs.get("on_exit"))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")

__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
174
175
176
177
178
179
180
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self

get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
167
168
169
def get_version(self) -> str:
    """"Returns the version"""
    return self.version

webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
164
165
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""

toolboxv2.get_app(from_=None, name=None, args=AppArgs().default(), app_con=None, sync=False)

Source code in toolboxv2/utils/system/getting_and_closing_app.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def get_app(from_=None, name=None, args=AppArgs().default(), app_con=None, sync=False) -> AppType:
    global registered_apps
    # name = None
    # print(f"get app requested from: {from_} withe name: {name}")
    logger = get_logger()
    logger.info(Style.GREYBG(f"get app requested from: {from_}"))
    if registered_apps[0] is not None:
        return registered_apps[0]

    if app_con is None:
        from ... import App
        app_con = App
    app = app_con(name, args=args) if name else app_con()
    logger.info(Style.Bold(f"App instance, returned ID: {app.id}"))

    registered_apps[0] = app
    return app

System Utilities & Configuration

toolboxv2.FileHandler

Bases: Code

Source code in toolboxv2/utils/system/file_handler.py
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
class FileHandler(Code):

    def __init__(self, filename, name='mainTool', keys=None, defaults=None):
        if defaults is None:
            defaults = {}
        if keys is None:
            keys = {}
        assert filename.endswith(".config") or filename.endswith(".data"), \
            f"filename must end with .config or .data {filename=}"
        self.file_handler_save = {}
        self.file_handler_load = {}
        self.file_handler_key_mapper = {}
        self.file_handler_filename = filename
        self.file_handler_storage = None
        self.file_handler_max_loaded_index_ = 0
        self.file_handler_file_prefix = (f".{filename.split('.')[1]}/"
                                         f"{name.replace('.', '-')}/")
        # self.load_file_handler()
        self.set_defaults_keys_file_handler(keys, defaults)

    def _open_file_handler(self, mode: str, rdu):
        logger = get_logger()
        logger.info(Style.Bold(Style.YELLOW(f"Opening file in mode : {mode}")))
        if self.file_handler_storage:
            self.file_handler_storage.close()
            self.file_handler_storage = None
        try:
            self.file_handler_storage = open(self.file_handler_file_prefix + self.file_handler_filename, mode)
            self.file_handler_max_loaded_index_ += 1
        except FileNotFoundError:
            if self.file_handler_max_loaded_index_ == 2:
                os.makedirs(self.file_handler_file_prefix, exist_ok=True)
            if self.file_handler_max_loaded_index_ == 3:
                os.makedirs(".config/mainTool", exist_ok=True)
            if self.file_handler_max_loaded_index_ >= 5:
                print(Style.RED(f"pleas create this file to prosed : {self.file_handler_file_prefix}"
                                f"{self.file_handler_filename}"))
                logger.critical(f"{self.file_handler_file_prefix} {self.file_handler_filename} FileNotFoundError cannot"
                                f" be Created")
                exit(0)
            self.file_handler_max_loaded_index_ += 1
            logger.info(Style.YELLOW(f"Try Creating File: {self.file_handler_file_prefix}{self.file_handler_filename}"))

            if not os.path.exists(f"{self.file_handler_file_prefix}"):
                os.makedirs(f"{self.file_handler_file_prefix}")

            with open(self.file_handler_file_prefix + self.file_handler_filename, 'a'):
                logger.info(Style.GREEN("File created successfully"))
                self.file_handler_max_loaded_index_ = -1
            rdu()
        except OSError and PermissionError as e:
            raise e

    def open_s_file_handler(self):
        self._open_file_handler('w+', self.open_s_file_handler)
        return self

    def open_l_file_handler(self):
        self._open_file_handler('r+', self.open_l_file_handler)
        return self

    def save_file_handler(self):
        get_logger().info(
            Style.BLUE(
                f"init Saving (S) {self.file_handler_filename} "
            )
        )
        if self.file_handler_storage:
            get_logger().warning(
                f"WARNING file is already open (S): {self.file_handler_filename} {self.file_handler_storage}")

        self.open_s_file_handler()

        get_logger().info(
            Style.BLUE(
                f"Elements to save : ({len(self.file_handler_save.keys())})"
            )
        )

        self.file_handler_storage.write(json.dumps(self.file_handler_save))

        self.file_handler_storage.close()
        self.file_handler_storage = None

        get_logger().info(
            Style.BLUE(
                f"closing file : {self.file_handler_filename} "
            )
        )

        return self

    def add_to_save_file_handler(self, key: str, value: str):
        if len(key) != 10:
            get_logger(). \
                warning(
                Style.YELLOW(
                    'WARNING: key length is not 10 characters'
                )
            )
            return False
        if key not in self.file_handler_load:
            if key in self.file_handler_key_mapper:
                key = self.file_handler_key_mapper[key]

        self.file_handler_load[key] = value
        self.file_handler_save[key] = self.encode_code(value)
        return True

    def remove_key_file_handler(self, key: str):
        if key == 'Pka7237327':
            print("Cant remove Root Key")
            return
        if key in self.file_handler_load:
            del self.file_handler_load[key]
        if key in self.file_handler_save:
            del self.file_handler_save[key]

    def load_file_handler(self):
        get_logger().info(
            Style.BLUE(
                f"loading {self.file_handler_filename} "
            )
        )
        if self.file_handler_storage:
            get_logger().warning(
                Style.YELLOW(
                    f"WARNING file is already open (L) {self.file_handler_filename}"
                )
            )
        self.open_l_file_handler()

        try:

            self.file_handler_save = json.load(self.file_handler_storage)
            for key, line in self.file_handler_save.items():
                self.file_handler_load[key] = self.decode_code(line)

        except json.decoder.JSONDecodeError and Exception:

            for line in self.file_handler_storage:
                line = line[:-1]
                heda = line[:10]
                self.file_handler_save[heda] = line[10:]
                enc = self.decode_code(line[10:])
                self.file_handler_load[heda] = enc

            self.file_handler_save = {}

        self.file_handler_storage.close()
        self.file_handler_storage = None

        return self

    def get_file_handler(self, obj: str, default=None) -> str or None:
        logger = get_logger()
        if obj not in self.file_handler_load:
            if obj in self.file_handler_key_mapper:
                obj = self.file_handler_key_mapper[obj]
        logger.info(Style.ITALIC(Style.GREY(f"Collecting data from storage key : {obj}")))
        self.file_handler_max_loaded_index_ = -1
        for objects in self.file_handler_load.items():
            self.file_handler_max_loaded_index_ += 1
            if obj == objects[0]:

                try:
                    if len(objects[1]) > 0:
                        return ast.literal_eval(objects[1]) if isinstance(objects[1], str) else objects[1]
                    logger.warning(
                        Style.YELLOW(
                            f"No data  {obj}  ; {self.file_handler_filename}"
                        )
                    )
                except ValueError:
                    logger.error(f"ValueError Loading {obj} ; {self.file_handler_filename}")
                except SyntaxError:
                    if isinstance(objects[1], str):
                        return objects[1]
                    logger.warning(
                        Style.YELLOW(
                            f"Possible SyntaxError Loading {obj} ; {self.file_handler_filename}"
                            f" {len(objects[1])} {type(objects[1])}"
                        )
                    )
                    return objects[1]
                except NameError:
                    return str(objects[1])

        if obj in list(self.file_handler_save.keys()):
            r = self.decode_code(self.file_handler_save[obj])
            logger.info(f"returning Default for {obj}")
            return r

        if default is None:
            default = self.file_handler_load.get(obj)

        logger.info("no data found")
        return default

    def set_defaults_keys_file_handler(self, keys: dict, defaults: dict):
        list_keys = iter(list(keys.keys()))
        df_keys = defaults.keys()
        for key in list_keys:
            self.file_handler_key_mapper[key] = keys[key]
            self.file_handler_key_mapper[keys[key]] = key
            if key in df_keys:
                self.file_handler_load[keys[key]] = str(defaults[key])
                self.file_handler_save[keys[key]] = self.encode_code(defaults[key])
            else:
                self.file_handler_load[keys[key]] = "None"

    def delete_file(self):
        os.remove(self.file_handler_file_prefix + self.file_handler_filename)
        get_logger().warning(Style.GREEN(f"File deleted {self.file_handler_file_prefix + self.file_handler_filename}"))

toolboxv2.utils

App

Source code in toolboxv2/utils/toolbox.py
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
class App(AppType, metaclass=Singleton):

    def __init__(self, prefix: str = "", args=AppArgs().default()):
        super().__init__(prefix, args)
        self._web_context = None
        t0 = time.perf_counter()
        abspath = os.path.abspath(__file__)
        self.system_flag = system()  # Linux: Linux Mac: Darwin Windows: Windows

        self.appdata = os.getenv('APPDATA') if os.name == 'nt' else os.getenv('XDG_CONFIG_HOME') or os.path.expanduser(
                '~/.config') if os.name == 'posix' else None

        if self.system_flag == "Darwin" or self.system_flag == "Linux":
            dir_name = os.path.dirname(abspath).replace("/utils", "")
        else:
            dir_name = os.path.dirname(abspath).replace("\\utils", "")

        self.start_dir = str(dir_name)

        self.bg_tasks = []

        lapp = dir_name + '\\.data\\'

        if not prefix:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt") as prefix_file:
                cont = prefix_file.read()
                if cont:
                    prefix = cont.rstrip()
        else:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt", "w") as prefix_file:
                prefix_file.write(prefix)

        self.prefix = prefix

        node_ = node()

        if 'localhost' in node_ and (host := os.getenv('HOSTNAME', 'localhost')) != 'localhost':
            node_ = node_.replace('localhost', host)
        self.id = prefix + '-' + node_
        self.globals = {
            "root": {**globals()},
        }
        self.locals = {
            "user": {'app': self, **locals()},
        }

        identification = self.id
        collective_identification = self.id
        if "test" in prefix:
            if self.system_flag == "Darwin" or self.system_flag == "Linux":
                start_dir = self.start_dir.replace("ToolBoxV2/toolboxv2", "toolboxv2")
            else:
                start_dir = self.start_dir.replace("ToolBoxV2\\toolboxv2", "toolboxv2")
            self.data_dir = start_dir + '\\.data\\' + "test"
            self.config_dir = start_dir + '\\.config\\' + "test"
            self.info_dir = start_dir + '\\.info\\' + "test"
        elif identification.startswith('collective-'):
            collective_identification = identification.split('-')[1]
            self.data_dir = self.start_dir + '\\.data\\' + collective_identification
            self.config_dir = self.start_dir + '\\.config\\' + collective_identification
            self.info_dir = self.start_dir + '\\.info\\' + collective_identification
            self.id = collective_identification
        else:
            self.data_dir = self.start_dir + '\\.data\\' + identification
            self.config_dir = self.start_dir + '\\.config\\' + identification
            self.info_dir = self.start_dir + '\\.info\\' + identification

        if self.appdata is None:
            self.appdata = self.data_dir
        else:
            self.appdata += "/ToolBoxV2"

        if not os.path.exists(self.appdata):
            os.makedirs(self.appdata, exist_ok=True)
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir, exist_ok=True)
        if not os.path.exists(self.config_dir):
            os.makedirs(self.config_dir, exist_ok=True)
        if not os.path.exists(self.info_dir):
            os.makedirs(self.info_dir, exist_ok=True)

        print(f"Starting ToolBox as {prefix} from :", Style.Bold(Style.CYAN(f"{os.getcwd()}")))

        logger_info_str, self.logger, self.logging_filename = self.set_logger(args.debug)

        print("Logger " + logger_info_str)
        print("================================")
        self.logger.info("Logger initialized")
        get_logger().info(Style.GREEN("Starting Application instance"))
        if args.init and args.init is not None and self.start_dir not in sys.path:
            sys.path.append(self.start_dir)

        __version__ = get_version_from_pyproject()
        self.version = __version__

        self.keys = {
            "MACRO": "macro~~~~:",
            "MACRO_C": "m_color~~:",
            "HELPER": "helper~~~:",
            "debug": "debug~~~~:",
            "id": "name-spa~:",
            "st-load": "mute~load:",
            "comm-his": "comm-his~:",
            "develop-mode": "dev~mode~:",
            "provider::": "provider::",
        }

        defaults = {
            "MACRO": ['Exit'],
            "MACRO_C": {},
            "HELPER": {},
            "debug": args.debug,
            "id": self.id,
            "st-load": False,
            "comm-his": [[]],
            "develop-mode": False,
        }
        self.config_fh = FileHandler(collective_identification + ".config", keys=self.keys, defaults=defaults)
        self.config_fh.load_file_handler()
        self._debug = args.debug
        self.flows = {}
        self.dev_modi = self.config_fh.get_file_handler(self.keys["develop-mode"])
        if self.config_fh.get_file_handler("provider::") is None:
            self.config_fh.add_to_save_file_handler("provider::", "http://localhost:" + str(
                self.args_sto.port) if os.environ.get("HOSTNAME","localhost") == "localhost" else "https://simplecore.app")
        self.functions = {}
        self.modules = {}

        self.interface_type = ToolBoxInterfaces.native
        self.PREFIX = Style.CYAN(f"~{node()}@>")
        self.alive = True
        self.called_exit = False, time.time()

        self.print(f"Infos:\n  {'Name':<8} -> {node()}\n  {'ID':<8} -> {self.id}\n  {'Version':<8} -> {self.version}\n")

        self.logger.info(
            Style.GREEN(
                f"Finish init up in {time.perf_counter() - t0:.2f}s"
            )
        )

        self.args_sto = args
        self.loop = None

        from .system.session import Session
        self.session: Session = Session(self.get_username())
        if len(sys.argv) > 2 and sys.argv[1] == "db":
            return
        from .system.db_cli_manager import ClusterManager, get_executable_path
        self.cluster_manager = ClusterManager()
        online_list, server_list = self.cluster_manager.status_all(silent=True)
        if not server_list:
            self.cluster_manager.start_all(get_executable_path(), self.version)
            _, server_list = self.cluster_manager.status_all()
        from .extras.blobs import BlobStorage
        self.root_blob_storage = BlobStorage(servers=server_list, storage_directory=self.data_dir+ '\\blob_cache\\')
        # self._start_event_loop()

    def _start_event_loop(self):
        """Starts the asyncio event loop in a separate thread."""
        if self.loop is None:
            self.loop = asyncio.new_event_loop()
            self.loop_thread = threading.Thread(target=self.loop.run_forever, daemon=True)
            self.loop_thread.start()

    def get_username(self, get_input=False, default="loot") -> str:
        user_name = self.config_fh.get_file_handler("ac_user:::")
        if get_input and user_name is None:
            user_name = input("Input your username: ")
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        if user_name is None:
            user_name = default
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        return user_name

    def set_username(self, username):
        return self.config_fh.add_to_save_file_handler("ac_user:::", username)

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        if "test" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.NOTSET, name="toolbox-test", interminal=True,
                                                     file_level=logging.NOTSET, app_name=self.id)
            logger_info_str = "in Test Mode"
        elif "live" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-live", interminal=False,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in Live Mode"
            # setup_logging(logging.WARNING, name="toolbox-live", is_online=True
            #              , online_level=logging.WARNING).info("Logger initialized")
        elif "debug" in self.prefix or self.prefix.endswith("D"):
            self.prefix = self.prefix.replace("-debug", '').replace("debug", '')
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-debug", interminal=True,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in debug Mode"
            self.debug = True
        elif debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name=f"toolbox-{self.prefix}-debug",
                                                     interminal=True,
                                                     file_level=logging.DEBUG, app_name=self.id)
            logger_info_str = "in args debug Mode"
        else:
            logger, logging_filename = setup_logging(logging.ERROR, name=f"toolbox-{self.prefix}", app_name=self.id)
            logger_info_str = "in Default"

        return logger_info_str, logger, logging_filename

    @property
    def debug(self):
        return self._debug

    @debug.setter
    def debug(self, value):
        if not isinstance(value, bool):
            self.logger.debug(f"Value must be an boolean. is : {value} type of {type(value)}")
            raise ValueError("Value must be an boolean.")

        # self.logger.info(f"Setting debug {value}")
        self._debug = value

    def debug_rains(self, e):
        if self.debug:
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)
            raise e
        else:
            self.logger.error(f"Error: {e}")
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)

    def set_flows(self, r):
        self.flows = r

    async def run_flows(self, name, **kwargs):
        from ..flows import flows_dict as flows_dict_func
        if name not in self.flows:
            self.flows = {**self.flows, **flows_dict_func(s=name, remote=True)}
        if name in self.flows:
            if asyncio.iscoroutinefunction(self.flows[name]):
                return await self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
            else:
                return self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
        else:
            print("Flow not found, active flows:", len(self.flows.keys()))

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):

        mode = 'xb'
        self.logger.info(f" coppy mod {mod_name} to {new_mod_dir} size : {sys.getsizeof(content) / 8388608:.3f} mb")

        if not os.path.exists(new_mod_dir):
            os.makedirs(new_mod_dir)
            with open(f"{new_mod_dir}/__init__.py", "w") as nmd:
                nmd.write(f"__version__ = '{self.version}'")

        if os.path.exists(f"{new_mod_dir}/{mod_name}.{file_type}"):
            mode = False

            with open(f"{new_mod_dir}/{mod_name}.{file_type}", 'rb') as d:
                runtime_mod = d.read()  # Testing version but not efficient

            if len(content) != len(runtime_mod):
                mode = 'wb'

        if mode:
            with open(f"{new_mod_dir}/{mod_name}.{file_type}", mode) as f:
                f.write(content)

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        working_dir = self.id.replace(".", "_")
        lib_mod_dir = f"toolboxv2.runtime.{working_dir}.mod_lib."

        self.logger.info(f"pre_lib_mod {mod_name} from {lib_mod_dir}")

        postfix = "_dev" if self.dev_modi else ""
        mod_file_dir = f"./mods{postfix}/{mod_name}.{file_type}"
        new_mod_dir = f"{path_to}/{working_dir}/mod_lib"
        with open(mod_file_dir, "rb") as c:
            content = c.read()
        self._coppy_mod(content, new_mod_dir, mod_name, file_type=file_type)
        return lib_mod_dir

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        loc = self._pre_lib_mod(mod_name, file_type)
        return self.inplace_load_instance(mod_name, loc=loc, **kwargs)

    def helper_install_pip_module(self, module_name):
        if 'main' in self.id:
            return
        self.print(f"Installing {module_name} GREEDY")
        os.system(f"{sys.executable} -m pip install {module_name}")

    def python_module_import_classifier(self, mod_name, error_message):

        if error_message.startswith("No module named 'toolboxv2.utils"):
            return Result.default_internal_error(f"404 {error_message.split('utils')[1]} not found")
        if error_message.startswith("No module named 'toolboxv2.mods"):
            if mod_name.startswith('.'):
                return
            return self.run_a_from_sync(self.a_run_any, ("CloudM", "install"), module_name=mod_name)
        if error_message.startswith("No module named '"):
            pip_requ = error_message.split("'")[1].replace("'", "").strip()
            # if 'y' in input(f"\t\t\tAuto install {pip_requ} Y/n").lower:
            return self.helper_install_pip_module(pip_requ)
            # return Result.default_internal_error(f"404 {pip_requ} not found")

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True, mfo=None):
        if self.dev_modi and loc == "toolboxv2.mods.":
            loc = "toolboxv2.mods_dev."
        if spec=='app' and self.mod_online(mod_name):
            self.logger.info(f"Reloading mod from : {loc + mod_name}")
            self.remove_mod(mod_name, spec=spec, delete=False)

        if (os.path.exists(self.start_dir + '/mods/' + mod_name) or os.path.exists(
            self.start_dir + '/mods/' + mod_name + '.py')) and (
            os.path.isdir(self.start_dir + '/mods/' + mod_name) or os.path.isfile(
            self.start_dir + '/mods/' + mod_name + '.py')):
            try:
                if mfo is None:
                    modular_file_object = import_module(loc + mod_name)
                else:
                    modular_file_object = mfo
                self.modules[mod_name] = modular_file_object
            except ModuleNotFoundError as e:
                self.logger.error(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                self.print(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                if self.debug or self.args_sto.sysPrint:
                    self.python_module_import_classifier(mod_name, str(e))
                self.debug_rains(e)
                return None
        else:
            self.print(f"module {loc + mod_name} is not valid")
            return None
        if hasattr(modular_file_object, "Tools"):
            tools_class = modular_file_object.Tools
        else:
            if hasattr(modular_file_object, "name"):
                tools_class = modular_file_object
                modular_file_object = import_module(loc + mod_name)
            else:
                tools_class = None

        modular_id = None
        instance = modular_file_object
        app_instance_type = "file/application"

        if tools_class is None:
            modular_id = modular_file_object.Name if hasattr(modular_file_object, "Name") else mod_name

        if tools_class is None and modular_id is None:
            modular_id = str(modular_file_object.__name__)
            self.logger.warning(f"Unknown instance loaded {mod_name}")
            return modular_file_object

        if tools_class is not None:
            tools_class = self.save_initialized_module(tools_class, spec)
            modular_id = tools_class.name
            app_instance_type = "functions/class"
        else:
            instance.spec = spec
        # if private:
        #     self.functions[modular_id][f"{spec}_private"] = private

        if not save:
            return instance if tools_class is None else tools_class

        return self.save_instance(instance, modular_id, spec, app_instance_type, tools_class=tools_class)

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):

        if modular_id in self.functions and tools_class is None:
            if self.functions[modular_id].get(f"{spec}_instance", None) is None:
                self.functions[modular_id][f"{spec}_instance"] = instance
                self.functions[modular_id][f"{spec}_instance_type"] = instance_type
            else:
                self.print("Firest instance stays use new spec to get new instance")
                if modular_id in self.functions and self.functions[modular_id].get(f"{spec}_instance", None) is not None:
                    return self.functions[modular_id][f"{spec}_instance"]
                else:
                    raise ImportError(f"Module already known {modular_id} and not avalabel reload using other spec then {spec}")

        elif tools_class is not None:
            if modular_id not in self.functions:
                self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = tools_class
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

            try:
                if not hasattr(tools_class, 'tools'):
                    tools_class.tools = {"Version": tools_class.get_version, 'name': tools_class.name}
                for function_name in list(tools_class.tools.keys()):
                    t_function_name = function_name.lower()
                    if t_function_name != "all" and t_function_name != "name":
                        self.tb(function_name, mod_name=modular_id)(tools_class.tools.get(function_name))
                self.functions[modular_id][f"{spec}_instance_type"] += "/BC"
                if hasattr(tools_class, 'on_exit'):
                    if "on_exit" in self.functions[modular_id]:
                        self.functions[modular_id]["on_exit"].append(tools_class.on_exit)
                    else:
                        self.functions[modular_id]["on_exit"] = [tools_class.on_exit]
            except Exception as e:
                self.logger.error(f"Starting Module {modular_id} compatibility failed with : {e}")
                pass
        elif modular_id not in self.functions and tools_class is None:
            self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = instance
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

        else:
            raise ImportError(f"Modular {modular_id} is not a valid mod")
        on_start = self.functions[modular_id].get("on_start")
        if on_start is not None:
            i = 1
            for f in on_start:
                try:
                    f_, e = self.get_function((modular_id, f), state=True, specification=spec)
                    if e == 0:
                        self.logger.info(Style.GREY(f"Running On start {f} {i}/{len(on_start)}"))
                        if asyncio.iscoroutinefunction(f_):
                            self.print(f"Async on start is only in Tool claas supported for {modular_id}.{f}" if tools_class is None else f"initialization starting soon for {modular_id}.{f}")
                            self.run_bg_task_advanced(f_)
                        else:
                            o = f_()
                            if o is not None:
                                self.print(f"Function {modular_id} On start result: {o}")
                    else:
                        self.logger.warning(f"starting function not found {e}")
                except Exception as e:
                    self.logger.debug(Style.YELLOW(
                        Style.Bold(f"modular:{modular_id}.{f} on_start error {i}/{len(on_start)} -> {e}")))
                    self.debug_rains(e)
                finally:
                    i += 1
        return instance if tools_class is None else tools_class

    def save_initialized_module(self, tools_class, spec):
        tools_class.spec = spec
        live_tools_class = tools_class(app=self)
        return live_tools_class

    def mod_online(self, mod_name, installed=False):
        if installed and mod_name not in self.functions:
            self.save_load(mod_name)
        return mod_name in self.functions

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0, **kwargs):

        if as_str is None and isinstance(name, Enum):
            modular_id = str(name.NAME.value)
            function_id = str(name.value)
        elif as_str is None and isinstance(name, list):
            modular_id, function_id = name[0], name[1]
        else:
            modular_id, function_id = as_str

        self.logger.info(f"getting function : {specification}.{modular_id}.{function_id}")

        if modular_id not in self.functions:
            if r == 0:
                self.save_load(modular_id, spec=specification)
                return self.get_function(name=(modular_id, function_id),
                                         state=state,
                                         specification=specification,
                                         metadata=metadata,
                                         r=1)
            self.logger.warning(f"function modular not found {modular_id} 404")
            return "404", 404

        if function_id not in self.functions[modular_id]:
            self.logger.warning(f"function data not found {modular_id}.{function_id} 404")
            return "404", 404

        function_data = self.functions[modular_id][function_id]

        if isinstance(function_data, list):
            print(f"functions {function_id} : {function_data}")
            function_data = self.functions[modular_id][function_data[kwargs.get('i', -1)]]
            print(f"functions {modular_id} : {function_data}")
        function = function_data.get("func")
        params = function_data.get("params")

        state_ = function_data.get("state")
        if state_ is not None and state != state_:
            state = state_

        if function is None:
            self.logger.warning("No function found")
            return "404", 404

        if params is None:
            self.logger.warning("No function (params) found")
            return "404", 301

        if metadata and not state:
            self.logger.info("returning metadata stateless")
            return (function_data, function), 0

        if not state:  # mens a stateless function
            self.logger.info("returning stateless function")
            return function, 0

        instance = self.functions[modular_id].get(f"{specification}_instance")

        # instance_type = self.functions[modular_id].get(f"{specification}_instance_type", "functions/class")

        if params[0] == 'app':
            instance = get_app(from_=f"fuction {specification}.{modular_id}.{function_id}")

        if instance is None and self.alive:
            self.inplace_load_instance(modular_id, spec=specification)
            instance = self.functions[modular_id].get(f"{specification}_instance")

        if instance is None:
            self.logger.warning("No live Instance found")
            return "404", 400

        # if instance_type.endswith("/BC"):  # for backwards compatibility  functions/class/BC old modules
        #     # returning as stateless
        #     # return "422", -1
        #     self.logger.info(
        #         f"returning stateless function, cant find tools class for state handling found {instance_type}")
        #     if metadata:
        #         self.logger.info(f"returning metadata stateless")
        #         return (function_data, function), 0
        #     return function, 0

        self.logger.info("wrapping in higher_order_function")

        self.logger.info(f"returned fuction {specification}.{modular_id}.{function_id}")
        higher_order_function = partial(function, instance)

        if metadata:
            self.logger.info("returning metadata stateful")
            return (function_data, higher_order_function), 0

        self.logger.info("returning stateful function")
        return higher_order_function, 0

    def save_exit(self):
        self.logger.info(f"save exiting saving data to {self.config_fh.file_handler_filename} states of {self.debug=}")
        self.config_fh.add_to_save_file_handler(self.keys["debug"], str(self.debug))

    def init_mod(self, mod_name, spec='app'):
        """
        Initializes a module in a thread-safe manner by submitting the
        asynchronous initialization to the running event loop.
        """
        if '.' in mod_name:
            mod_name = mod_name.split('.')[0]
        self.run_bg_task(self.a_init_mod, mod_name, spec)
        # loop = self.loop_gard()
        # if loop:
        #     # Create a future to get the result from the coroutine
        #     future: Future = asyncio.run_coroutine_threadsafe(
        #         self.a_init_mod(mod_name, spec), loop
        #     )
        #     # Block until the result is available
        #     return future.result()
        # else:
        #     raise ValueError("Event loop is not running")
        #     # return self.loop_gard().run_until_complete(self.a_init_mod(mod_name, spec))

    def run_bg_task(self, task: Callable, *args, **kwargs) -> asyncio.Task | None:
        """
        Runs a coroutine in the background without blocking the caller.

        This is the primary method for "fire-and-forget" async tasks. It schedules
        the coroutine to run on the application's main event loop.

        Args:
            task: The coroutine function to run.
            *args: Arguments to pass to the coroutine function.
            **kwargs: Keyword arguments to pass to the coroutine function.

        Returns:
            An asyncio.Task object representing the scheduled task, or None if
            the task could not be scheduled.
        """
        if not callable(task):
            self.logger.warning("Task passed to run_bg_task is not callable!")
            return None

        if not asyncio.iscoroutinefunction(task) and not asyncio.iscoroutine(task):
            self.logger.warning(f"Task '{getattr(task, '__name__', 'unknown')}' is not a coroutine. "
                                f"Use run_bg_task_advanced for synchronous functions.")
            # Fallback to advanced runner for convenience
            self.run_bg_task_advanced(task, *args, **kwargs)
            return None

        try:
            loop = self.loop_gard()
            if not loop.is_running():
                # If the main loop isn't running, we can't create a task on it.
                # This scenario is handled by run_bg_task_advanced.
                self.logger.info("Main event loop not running. Delegating to advanced background runner.")
                return self.run_bg_task_advanced(task, *args, **kwargs)

            # Create the coroutine if it's a function
            coro = task(*args, **kwargs) if asyncio.iscoroutinefunction(task) else task

            # Create a task on the running event loop
            bg_task = loop.create_task(coro)

            # Add a callback to log exceptions from the background task
            def _log_exception(the_task: asyncio.Task):
                if not the_task.cancelled() and the_task.exception():
                    self.logger.error(f"Exception in background task '{the_task.get_name()}':",
                                      exc_info=the_task.exception())

            bg_task.add_done_callback(_log_exception)
            self.bg_tasks.append(bg_task)
            return bg_task

        except Exception as e:
            self.logger.error(f"Failed to schedule background task: {e}", exc_info=True)
            return None

    def run_bg_task_advanced(self, task: Callable, *args, **kwargs) -> threading.Thread:
        """
        Runs a task in a separate, dedicated background thread with its own event loop.

        This is ideal for:
        1. Running an async task from a synchronous context.
        2. Launching a long-running, independent operation that should not
           interfere with the main application's event loop.

        Args:
            task: The function to run (can be sync or async).
            *args: Arguments for the task.
            **kwargs: Keyword arguments for the task.

        Returns:
            The threading.Thread object managing the background execution.
        """
        if not callable(task):
            self.logger.warning("Task for run_bg_task_advanced is not callable!")
            return None

        def thread_target():
            # Each thread gets its own event loop.
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Prepare the coroutine we need to run
                if asyncio.iscoroutinefunction(task):
                    coro = task(*args, **kwargs)
                elif asyncio.iscoroutine(task):
                    # It's already a coroutine object
                    coro = task
                else:
                    # It's a synchronous function, run it in an executor
                    # to avoid blocking the new event loop.
                    coro = loop.run_in_executor(None, lambda: task(*args, **kwargs))

                # Run the coroutine to completion
                result = loop.run_until_complete(coro)
                self.logger.debug(f"Advanced background task '{getattr(task, '__name__', 'unknown')}' completed.")
                if result is not None:
                    self.logger.debug(f"Task result: {str(result)[:100]}")

            except Exception as e:
                self.logger.error(f"Error in advanced background task '{getattr(task, '__name__', 'unknown')}':",
                                  exc_info=e)
            finally:
                # Cleanly shut down the event loop in this thread.
                try:
                    all_tasks = asyncio.all_tasks(loop=loop)
                    if all_tasks:
                        for t in all_tasks:
                            t.cancel()
                        loop.run_until_complete(asyncio.gather(*all_tasks, return_exceptions=True))
                finally:
                    loop.close()
                    asyncio.set_event_loop(None)

        # Create, start, and return the thread.
        # It's a daemon thread so it won't prevent the main app from exiting.
        t = threading.Thread(target=thread_target, daemon=True, name=f"BGTask-{getattr(task, '__name__', 'unknown')}")
        self.bg_tasks.append(t)
        t.start()
        return t

    # Helper method to wait for background tasks to complete (optional)
    def wait_for_bg_tasks(self, timeout=None):
        """
        Wait for all background tasks to complete.

        Args:
            timeout: Maximum time to wait (in seconds) for all tasks to complete.
                     None means wait indefinitely.

        Returns:
            bool: True if all tasks completed, False if timeout occurred
        """
        active_tasks = [t for t in self.bg_tasks if t.is_alive()]

        for task in active_tasks:
            task.join(timeout=timeout)
            if task.is_alive():
                return False

        return True

    def __call__(self, *args, **kwargs):
        return self.run(*args, **kwargs)

    def run(self, *args, request=None, running_function_coro=None, **kwargs):
        """
        Run a function with support for SSE streaming in both
        threaded and non-threaded contexts.
        """
        if running_function_coro is None:
            mn, fn = args[0]
            if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
                kwargs["request"] = RequestData.from_dict(request)
                if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                    kwargs["request"].data = kwargs["request"].body = kwargs['data']
                    del kwargs['data']
                if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                           []):
                    kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                    del kwargs['form_data']

        # Create the coroutine
        coro = running_function_coro or self.a_run_any(*args, **kwargs)

        # Get or create an event loop
        try:
            loop = asyncio.get_event_loop()
            is_running = loop.is_running()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            is_running = False

        # If the loop is already running, run in a separate thread
        if is_running:
            # Create thread pool executor as needed
            if not hasattr(self.__class__, '_executor'):
                self.__class__._executor = ThreadPoolExecutor(max_workers=4)

            def run_in_new_thread():
                # Set up a new loop in this thread
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)

                try:
                    # Run the coroutine
                    return new_loop.run_until_complete(coro)
                finally:
                    new_loop.close()

            # Run in thread and get result
            thread_result = self.__class__._executor.submit(run_in_new_thread).result()

            # Handle streaming results from thread
            if isinstance(thread_result, dict) and thread_result.get("is_stream"):
                # Create a new SSE stream in the main thread
                async def stream_from_function():
                    # Re-run the function with direct async access
                    stream_result = await self.a_run_any(*args, **kwargs)

                    if (isinstance(stream_result, Result) and
                        getattr(stream_result.result, 'data_type', None) == "stream"):
                        # Get and forward data from the original generator
                        original_gen = stream_result.result.data.get("generator")
                        if inspect.isasyncgen(original_gen):
                            async for item in original_gen:
                                yield item

                # Return a new streaming Result
                return Result.stream(
                    stream_generator=stream_from_function(),
                    headers=thread_result.get("headers", {})
                )

            result = thread_result
        else:
            # Direct execution when loop is not running
            result = loop.run_until_complete(coro)

        # Process the final result
        if isinstance(result, Result):
            if 'debug' in self.id:
                result.print()
            if getattr(result.result, 'data_type', None) == "stream":
                return result
            return result.to_api_result().model_dump(mode='json')

        return result

    def loop_gard(self):
        if self.loop is None:
            self._start_event_loop()
            self.loop = asyncio.get_event_loop()
        if self.loop.is_closed():
            self.loop = asyncio.get_event_loop()
        return self.loop

    async def a_init_mod(self, mod_name, spec='app'):
        mod = self.save_load(mod_name, spec=spec)
        if hasattr(mod, "__initobj") and not mod.async_initialized:
            await mod
        return mod


    def load_mod(self, mod_name: str, mlm='I', **kwargs):

        action_list_helper = ['I (inplace load dill on error python)',
                              # 'C (coppy py file to runtime dir)',
                              # 'S (save py file to dill)',
                              # 'CS (coppy and save py file)',
                              # 'D (development mode, inplace load py file)'
                              ]
        action_list = {"I": lambda: self.inplace_load_instance(mod_name, **kwargs),
                       "C": lambda: self._copy_load(mod_name, **kwargs)
                       }

        try:
            if mlm in action_list:

                return action_list.get(mlm)()
            else:
                self.logger.critical(
                    f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
                raise ValueError(f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
        except ValueError as e:
            self.logger.warning(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except ImportError as e:
            self.logger.error(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except Exception as e:
            self.logger.critical(Style.RED(f"Error Loading Module '{mod_name}', with critical error :{e}"))
            print(Style.RED(f"Error Loading Module '{mod_name}'"))
            self.debug_rains(e)

        return Result.default_internal_error(info="info's in logs.")

    async def load_external_mods(self):
        for mod_path in os.getenv("EXTERNAL_PATH_RUNNABLE", '').split(','):
            if mod_path:
                await self.load_all_mods_in_file(mod_path)

    async def load_all_mods_in_file(self, working_dir="mods"):
        print(f"LOADING ALL MODS FROM FOLDER : {working_dir}")
        t0 = time.perf_counter()
        # Get the list of all modules
        module_list = self.get_all_mods(working_dir)
        open_modules = self.functions.keys()
        start_len = len(open_modules)

        for om in open_modules:
            if om in module_list:
                module_list.remove(om)

        tasks: set[Task] = set()

        _ = {tasks.add(asyncio.create_task(asyncio.to_thread(self.save_load, mod, 'app'))) for mod in module_list}
        for t in asyncio.as_completed(tasks):
            try:
                result = await t
                if hasattr(result, 'Name'):
                    print('Opened :', result.Name)
                elif hasattr(result, 'name'):
                    if hasattr(result, 'async_initialized'):
                        if not result.async_initialized:
                            async def _():
                                try:
                                    if asyncio.iscoroutine(result):
                                        await result
                                    if hasattr(result, 'Name'):
                                        print('Opened :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Opened :', result.name)
                                except Exception as e:
                                    self.debug_rains(e)
                                    if hasattr(result, 'Name'):
                                        print('Error opening :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Error opening :', result.name)
                            asyncio.create_task(_())
                        else:
                            print('Opened :', result.name)
                else:
                    print('Opened :', result)
            except Exception as e:
                self.logger.error(Style.RED(f"An Error occurred while opening all modules error: {str(e)}"))
                self.debug_rains(e)
        opened = len(self.functions.keys()) - start_len

        self.logger.info(f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s")
        return f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s"

    def get_all_mods(self, working_dir="mods", path_to="./runtime", use_wd=True):
        self.logger.info(f"collating all mods in working directory {working_dir}")

        pr = "_dev" if self.dev_modi else ""
        if working_dir == "mods" and use_wd:
            working_dir = f"{self.start_dir}/mods{pr}"
        elif use_wd:
            pass
        else:
            w_dir = self.id.replace(".", "_")
            working_dir = f"{path_to}/{w_dir}/mod_lib{pr}/"
        res = os.listdir(working_dir)

        self.logger.info(f"found : {len(res)} files")

        def do_helper(_mod):
            if "mainTool" in _mod:
                return False
            # if not _mod.endswith(".py"):
            #     return False
            if _mod.startswith("__"):
                return False
            if _mod.startswith("."):
                return False
            return not _mod.startswith("test_")

        def r_endings(word: str):
            if word.endswith(".py"):
                return word[:-3]
            return word

        mods_list = list(map(r_endings, filter(do_helper, res)))

        self.logger.info(f"found : {len(mods_list)} Modules")
        return mods_list

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    def remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return

        on_exit = self.functions[mod_name].get("on_exit")
        self.logger.info(f"closing: {on_exit}")
        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    self.exit_tasks.append(instance.on_exit)
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1

        for j, f in enumerate(on_exit):
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec, i=j)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        self.exit_tasks.append(f_)
                        o = None
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))

                self.debug_rains(e)
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")
        self.logger.info(f"closing: {on_exit}")
        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    await instance.on_exit()
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                e = 1
                if isinstance(f, str):
                    f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                elif isinstance(f, Callable):
                    f_, e, f  = f, 0, f.__name__
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        o = await f_()
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
                self.debug_rains(e)
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    def exit(self, remove_all=True):
        if not self.alive:
            return
        if self.args_sto.debug:
            self.hide_console()
        self.disconnect()
        if remove_all:
            self.remove_all_modules()
        self.logger.info("Exiting ToolBox interface")
        self.alive = False
        self.called_exit = True, time.time()
        self.save_exit()
        if hasattr(self, 'root_blob_storage') and self.root_blob_storage:
            self.root_blob_storage.exit()
        try:
            self.config_fh.save_file_handler()
        except SystemExit:
            print("If u ar testing this is fine else ...")

        if hasattr(self, 'daemon_app'):
            import threading

            for thread in threading.enumerate()[::-1]:
                if thread.name == "MainThread":
                    continue
                try:
                    with Spinner(f"closing Thread {thread.name:^50}|", symbols="s", count_down=True,
                                 time_in_s=0.751 if not self.debug else 0.6):
                        thread.join(timeout=0.751 if not self.debug else 0.6)
                except TimeoutError as e:
                    self.logger.error(f"Timeout error on exit {thread.name} {str(e)}")
                    print(str(e), f"Timeout {thread.name}")
                except KeyboardInterrupt:
                    print("Unsave Exit")
                    break
        if hasattr(self, 'loop') and self.loop is not None:
            with Spinner("closing Event loop:", symbols="+"):
                self.loop.stop()

    async def a_exit(self):
        await self.a_remove_all_modules(delete=True)
        results = await asyncio.gather(
            *[asyncio.create_task(f()) for f in self.exit_tasks if asyncio.iscoroutinefunction(f)])
        for result in results:
            self.print(f"Function On Exit result: {result}")
        self.exit(remove_all=False)

    def save_load(self, modname, spec='app'):
        self.logger.debug(f"Save load module {modname}")
        if not modname:
            self.logger.warning("no filename specified")
            return False
        try:
            return self.load_mod(modname, spec=spec)
        except ModuleNotFoundError as e:
            self.logger.error(Style.RED(f"Module {modname} not found"))
            self.debug_rains(e)

        return False

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """
        if isinstance(name, tuple):
            return self._get_function(None, as_str=name, **kwargs)
        else:
            return self._get_function(name, **kwargs)

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if tb_run_with_specification == 'ws_internal':
            modular_name = modular_name.split('/')[0]
            if not self.mod_online(modular_name, installed=True):
                self.get_mod(modular_name)
            handler_id, event_name = mod_function_name
            if handler_id in self.websocket_handlers and event_name in self.websocket_handlers[handler_id]:
                handler_func = self.websocket_handlers[handler_id][event_name]
                try:
                    # Führe den asynchronen Handler aus
                    if inspect.iscoroutinefunction(handler_func):
                        await handler_func(self, **kwargs)
                    else:
                        handler_func(self, **kwargs)  # Für synchrone Handler
                    return Result.ok(info=f"WS handler '{event_name}' executed.")
                except Exception as e:
                    self.logger.error(f"Error in WebSocket handler '{handler_id}/{event_name}': {e}", exc_info=True)
                    return Result.default_internal_error(info=str(e))
            else:
                # Kein Handler registriert, aber das ist kein Fehler (z.B. on_connect ist optional)
                return Result.ok(info=f"No WS handler for '{event_name}'.")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 404:
            mod = self.get_mod(modular_name)
            if hasattr(mod, "async_initialized") and not mod.async_initialized:
                await mod
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 404:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == 300:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            return await self.a_fuction_runner(function, function_data, args, kwargs, t0)
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)


    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        if tb_run_with_specification == 'ws_internal':
            handler_id, event_name = mod_function_name
            if handler_id in self.websocket_handlers and event_name in self.websocket_handlers[handler_id]:
                handler_func = self.websocket_handlers[handler_id][event_name]
                try:
                    # Führe den asynchronen Handler aus
                    if inspect.iscoroutinefunction(handler_func):
                        return self.loop.run_until_complete(handler_func(self, **kwargs))
                    else:
                        handler_func(self, **kwargs)  # Für synchrone Handler
                    return Result.ok(info=f"WS handler '{event_name}' executed.")
                except Exception as e:
                    self.logger.error(f"Error in WebSocket handler '{handler_id}/{event_name}': {e}", exc_info=True)
                    return Result.default_internal_error(info=str(e))
            else:
                # Kein Handler registriert, aber das ist kein Fehler (z.B. on_connect ist optional)
                return Result.ok(info=f"No WS handler for '{event_name}'.")

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 1 or error_code == 3 or error_code == 400:
            self.get_mod(modular_name)
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 2:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == -1:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            raise ValueError(f"Fuction {function_name} is Async use a_run_any")
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_a_from_sync(self, function, *args, **kwargs):
        # Initialize self.loop if not already set.
        if self.loop is None:
            try:
                self.loop = asyncio.get_running_loop()
            except RuntimeError:
                self.loop = asyncio.new_event_loop()

        # If the loop is running, offload the coroutine to a new thread.
        if self.loop.is_running():
            result_future = Future()

            def run_in_new_loop():
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)
                try:
                    result = new_loop.run_until_complete(function(*args, **kwargs))
                    result_future.set_result(result)
                except Exception as e:
                    result_future.set_exception(e)
                finally:
                    new_loop.close()

            thread = threading.Thread(target=run_in_new_loop)
            thread.start()
            thread.join()  # Block until the thread completes.
            return result_future.result()
        else:
            # If the loop is not running, schedule and run the coroutine directly.
            future = self.loop.create_task(function(*args, **kwargs))
            return self.loop.run_until_complete(future)

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = function()
            elif len(parameters) == len(args) + if_self_state:
                res = function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = function(**kwargs)
            else:
                res = function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)
            self.print(f"! Function ERROR: in {modular_name}.{function_name} ")



        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = await function()
            elif len(parameters) == len(args) + if_self_state:
                res = await function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = await function(**kwargs)
            else:
                res = await function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)

        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None,
                       args_=None,
                       kwargs_=None, method="GET",
                       *args, **kwargs):
        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        modular_name = mod_function_name
        function_name = function_name

        if isinstance(mod_function_name, str) and isinstance(function_name, str):
            mod_function_name = (mod_function_name, function_name)

        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value

        self.logger.info(f"getting function : {modular_name}.{function_name} from http {self.session.base}")
        r = await self.session.fetch(f"/api/{modular_name}/{function_name}{'?' + args_ if args_ is not None else ''}",
                                     data=kwargs, method=method)
        try:
            if not r:
                print("§ Session server Offline!", self.session.base)
                return Result.default_internal_error(info="Session fetch failed").as_dict()

            content_type = r.headers.get('Content-Type', '').lower()

            if 'application/json' in content_type:
                try:
                    return r.json()
                except Exception as e:
                    print(f"⚠ JSON decode error: {e}")
                    # Fallback to text if JSON decoding fails
                    text = r.text
            else:
                text = r.text

            if isinstance(text, Callable):
                if asyncio.iscoroutinefunction(text):
                    text = await text()
                else:
                    text = text()

            # Attempt YAML
            if 'yaml' in content_type or text.strip().startswith('---'):
                try:
                    import yaml
                    return yaml.safe_load(text)
                except Exception as e:
                    print(f"⚠ YAML decode error: {e}")

            # Attempt XML
            if 'xml' in content_type or text.strip().startswith('<?xml'):
                try:
                    import xmltodict
                    return xmltodict.parse(text)
                except Exception as e:
                    print(f"⚠ XML decode error: {e}")

            # Fallback: return plain text
            return Result.default_internal_error(data={'raw_text': text, 'content_type': content_type}).as_dict()

        except Exception as e:
            print("❌ Fatal error during API call:", e)
            self.debug_rains(e)
            return Result.default_internal_error(str(e)).as_dict()

    def run_local(self, *args, **kwargs):
        return self.run_any(*args, **kwargs)

    async def a_run_local(self, *args, **kwargs):
        return await self.a_run_any(*args, **kwargs)

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = self.run_function(mod_function_name,
                                        tb_run_function_with_state=tb_run_function_with_state,
                                        tb_run_with_specification=tb_run_with_specification,
                                        args_=args, kwargs_=kwargs).as_result()
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.log(show_data=False)

        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = await self.a_run_function(mod_function_name,
                                                tb_run_function_with_state=tb_run_function_with_state,
                                                tb_run_with_specification=tb_run_with_specification,
                                                args_=args, kwargs_=kwargs)
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.print()
            res.log(show_data=False) if isinstance(res, Result) else self.logger.debug(res)
        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res


    def web_context(self):
        if self._web_context is None:
            try:
                self._web_context = open("./dist/helper.html", encoding="utf-8").read()
            except Exception as e:
                self.logger.error(f"Could not load web context: {e}")
                self._web_context = "<div><h1>Web Context not found</h1></div>"
        return self._web_context

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        if spec != "app":
            self.print(f"Getting Module {name} spec: {spec}")
        if name not in self.functions:
            mod = self.save_load(name, spec=spec)
            if mod is False or (isinstance(mod, Result) and mod.is_error()):
                self.logger.warning(f"Could not find {name} in {list(self.functions.keys())}")
                raise ValueError(f"Could not find {name} in {list(self.functions.keys())} pleas install the module, or its posibly broken use --debug for infos")
        # private = self.functions[name].get(f"{spec}_private")
        # if private is not None:
        #     if private and spec != 'app':
        #         raise ValueError("Module is private")
        if name not in self.functions:
            self.logger.warning(f"Module '{name}' is not found")
            return None
        instance = self.functions[name].get(f"{spec}_instance")
        if instance is None:
            return self.load_mod(name, spec=spec)
        return self.functions[name].get(f"{spec}_instance")

    def print(self, text="", *args, **kwargs):
        # self.logger.info(f"Output : {text}")
        if 'live' in self.id:
            return

        flush = kwargs.pop('flush', True)
        if self.sprint(None):
            print(Style.CYAN(f"System${self.id}:"), end=" ", flush=flush)
        print(text, *args, **kwargs, flush=flush)

    def sprint(self, text="", *args, **kwargs):
        if text is None:
            return True
        if 'live' in self.id:
            return
        flush = kwargs.pop('flush', True)
        # self.logger.info(f"Output : {text}")
        print(Style.CYAN(f"System${self.id}:"), end=" ", flush=flush)
        if isinstance(text, str) and kwargs == {} and text:
            stram_print(text + ' '.join(args))
            print()
        else:
            print(text, *args, **kwargs, flush=flush)

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        self.remove_mod(mod_name, delete=True)
        if mod_name not in self.modules:
            self.logger.warning(f"Module '{mod_name}' is not found")
            return
        if hasattr(self.modules[mod_name], 'reload_save') and self.modules[mod_name].reload_save:
            def reexecute_module_code(x):
                return x
        else:
            def reexecute_module_code(module_name):
                if isinstance(module_name, str):
                    module = import_module(module_name)
                else:
                    module = module_name
                # Get the source code of the module
                try:
                    source = inspect.getsource(module)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    return module
                # Compile the source code
                try:
                    code = compile(source, module.__file__, 'exec')
                    # Execute the code in the module's namespace
                    exec(code, module.__dict__)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    pass
                return module

        if not is_file:
            mods = self.get_all_mods("./mods/" + mod_name)
            def recursive_reload(package_name):
                package = import_module(package_name)

                # First, reload all submodules
                if hasattr(package, '__path__'):
                    for _finder, name, _ispkg in pkgutil.walk_packages(package.__path__, package.__name__ + "."):
                        try:
                            mod = import_module(name)
                            reexecute_module_code(mod)
                            reload(mod)
                        except Exception as e:
                            print(f"Error reloading module {name}: {e}")
                            break

                # Finally, reload the package itself
                reexecute_module_code(package)
                reload(package)

            for mod in mods:
                if mod.endswith(".txt") or mod.endswith(".yaml"):
                    continue
                try:
                    recursive_reload(loc + mod_name + '.' + mod)
                    self.print(f"Reloaded {mod_name}.{mod}")
                except ImportError:
                    self.print(f"Could not load {mod_name}.{mod}")
        reexecute_module_code(self.modules[mod_name])
        if mod_name in self.functions:
            if "on_exit" in self.functions[mod_name]:
                self.functions[mod_name]["on_exit"] = []
            if "on_start" in self.functions[mod_name]:
                self.functions[mod_name]["on_start"] = []
        self.inplace_load_instance(mod_name, spec=spec, mfo=reload(self.modules[mod_name]) if mod_name in self.modules else None)

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None, on_reload=None):
        if path_name is None:
            path_name = mod_name
        is_file = os.path.isfile(self.start_dir + '/mods/' + path_name + '.py')
        import watchfiles
        def helper():
            paths = f'mods/{path_name}' + ('.py' if is_file else '')
            self.logger.info(f'Watching Path: {paths}')
            try:
                for changes in watchfiles.watch(paths):
                    if not changes:
                        continue
                    self.reload_mod(mod_name, spec, is_file, loc)
                    if on_reload:
                        on_reload()
            except FileNotFoundError:
                self.logger.warning(f"Path {paths} not found")

        if not use_thread:
            helper()
        else:
            threading.Thread(target=helper, daemon=True).start()

    def _register_function(self, module_name, func_name, data):
        if module_name not in self.functions:
            self.functions[module_name] = {}
        if func_name in self.functions[module_name]:
            self.print(f"Overriding function {func_name} from {module_name}", end="\r")
            self.functions[module_name][func_name] = data
        else:
            self.functions[module_name][func_name] = data

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial: bool=False,
                          exit_f: bool=False,
                          test: bool=True,
                          samples:list[dict[str, Any]] | None=None,
                          state:bool | None=None,
                          pre_compute:Callable | None=None,
                          post_compute:Callable[[], Result] | None=None,
                          api_methods:list[str] | None=None,
                          memory_cache: bool=False,
                          file_cache: bool=False,
                          request_as_kwarg: bool=False,
                          row: bool=False,
                          memory_cache_max_size:int=100,
                          memory_cache_ttl:int=300,
                          websocket_handler: str | None = None,
                          ):

        if isinstance(type_, Enum):
            type_ = type_.value

        if memory_cache and file_cache:
            raise ValueError("Don't use both cash at the same time for the same fuction")

        use_cache = memory_cache or file_cache
        cache = {}
        if file_cache:
            cache = FileCache(folder=self.data_dir + f'\\cache\\{mod_name}\\',
                              filename=self.data_dir + f'\\cache\\{mod_name}\\{name}cache.db')
        if memory_cache:
            cache = MemoryCache(maxsize=memory_cache_max_size, ttl=memory_cache_ttl)

        version = self.version if version is None else self.version + ':' + version

        def a_additional_process(func):

            async def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = await pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = await func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = await post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            async def wrapper(*args, **kwargs):

                if not use_cache:
                    return await executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = await executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def additional_process(func):

            def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            def wrapper(*args, **kwargs):

                if not use_cache:
                    return executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def decorator(func):
            sig = signature(func)
            params = list(sig.parameters)
            module_name = mod_name if mod_name else func.__module__.split('.')[-1]
            func_name = name if name else func.__name__
            if func_name == 'on_start':
                func_name = 'on_startup'
            if func_name == 'on_exit':
                func_name = 'on_close'
            if api or pre_compute is not None or post_compute is not None or memory_cache or file_cache:
                if asyncio.iscoroutinefunction(func):
                    func = a_additional_process(func)
                else:
                    func = additional_process(func)
            if api and str(sig.return_annotation) == 'Result':
                raise ValueError(f"Fuction {module_name}.{func_name} registered as "
                                 f"Api fuction but uses {str(sig.return_annotation)}\n"
                                 f"Please change the sig from ..)-> Result to ..)-> ApiResult")
            data = {
                "type": type_,
                "module_name": module_name,
                "func_name": func_name,
                "level": level,
                "restrict_in_virtual_mode": restrict_in_virtual_mode,
                "func": func,
                "api": api,
                "helper": helper,
                "version": version,
                "initial": initial,
                "exit_f": exit_f,
                "api_methods": api_methods if api_methods is not None else ["AUTO"],
                "__module__": func.__module__,
                "signature": sig,
                "params": params,
                "row": row,
                "state": (
                    False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
                "do_test": test,
                "samples": samples,
                "request_as_kwarg": request_as_kwarg,

            }

            if websocket_handler:
                # Die dekorierte Funktion sollte ein Dict mit den Handlern zurückgeben
                try:
                    handler_config = func(self)  # Rufe die Funktion auf, um die Konfiguration zu erhalten
                    if not isinstance(handler_config, dict):
                        raise TypeError(
                            f"WebSocket handler function '{func.__name__}' must return a dictionary of handlers.")

                    # Handler-Identifikator, z.B. "ChatModule/room_chat"
                    handler_id = f"{module_name}/{websocket_handler}"
                    self.websocket_handlers[handler_id] = {}

                    for event_name, handler_func in handler_config.items():
                        if event_name in ["on_connect", "on_message", "on_disconnect"] and callable(handler_func):
                            self.websocket_handlers[handler_id][event_name] = handler_func
                        else:
                            self.logger.warning(f"Invalid WebSocket handler event '{event_name}' in '{handler_id}'.")

                    self.logger.info(f"Registered WebSocket handlers for '{handler_id}'.")

                except Exception as e:
                    self.logger.error(f"Failed to register WebSocket handlers for '{func.__name__}': {e}",
                                      exc_info=True)
            else:
                self._register_function(module_name, func_name, data)

            if exit_f:
                if "on_exit" not in self.functions[module_name]:
                    self.functions[module_name]["on_exit"] = []
                self.functions[module_name]["on_exit"].append(func_name)
            if initial:
                if "on_start" not in self.functions[module_name]:
                    self.functions[module_name]["on_start"] = []
                self.functions[module_name]["on_start"].append(func_name)

            return func

        decorator.tb_init = True

        return decorator

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str | None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           request_as_kwarg: bool = False,
           row: bool = False,
           state: bool | None = None,
           level: int = -1,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           websocket_handler: str | None = None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.
        websocket_handler (str, optional): The name of the websocket handler to use.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      request_as_kwarg=request_as_kwarg,
                                      row=row,
                                      api_methods=api_methods,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl,
                                      websocket_handler=websocket_handler,
                                      )

    def save_autocompletion_dict(self):
        autocompletion_dict = {}
        for module_name, _module in self.functions.items():
            data = {}
            for function_name, function_data in self.functions[module_name].items():
                if not isinstance(function_data, dict):
                    continue
                data[function_name] = {arg: None for arg in
                                       function_data.get("params", [])}
                if len(data[function_name].keys()) == 0:
                    data[function_name] = None
            autocompletion_dict[module_name] = data if len(data.keys()) > 0 else None
        self.config_fh.add_to_save_file_handler("auto~~~~~~", str(autocompletion_dict))

    def get_autocompletion_dict(self):
        return self.config_fh.get_file_handler("auto~~~~~~")

    def save_registry_as_enums(self, directory: str, filename: str):
        # Ordner erstellen, falls nicht vorhanden
        if not os.path.exists(directory):
            os.makedirs(directory)

        # Dateipfad vorbereiten
        filepath = os.path.join(directory, filename)

        # Enum-Klassen als Strings generieren
        enum_classes = [f'"""Automatic generated by ToolBox v = {self.version}"""'
                        f'\nfrom enum import Enum\nfrom dataclasses import dataclass'
                        f'\n\n\n']
        for module, functions in self.functions.items():
            if module.startswith("APP_INSTANCE"):
                continue
            class_name = module
            enum_members = "\n    ".join(
                [
                    f"{func_name.upper().replace('-', '')}"
                    f" = '{func_name}' "
                    f"# Input: ({fuction_data['params'] if isinstance(fuction_data, dict) else ''}),"
                    f" Output: {fuction_data['signature'].return_annotation if isinstance(fuction_data, dict) else 'None'}"
                    for func_name, fuction_data in functions.items()])
            enum_class = (f'@dataclass\nclass {class_name.upper().replace(".", "_").replace("-", "")}(Enum):'
                          f"\n    NAME = '{class_name}'\n    {enum_members}")
            enum_classes.append(enum_class)

        # Enums in die Datei schreiben
        data = "\n\n\n".join(enum_classes)
        if len(data) < 12:
            raise ValueError(
                "Invalid Enums Loosing content pleas delete it ur self in the (utils/system/all_functions_enums.py) or add mor new stuff :}")
        with open(filepath, 'w') as file:
            file.write(data)

        print(Style.Bold(Style.BLUE(f"Enums gespeichert in {filepath}")))


    # WS logic

    def _set_rust_ws_bridge(self, bridge_object: Any):
        """
        Diese Methode wird von Rust aufgerufen, um die Kommunikationsbrücke zu setzen.
        Sie darf NICHT manuell von Python aus aufgerufen werden.
        """
        self.print(f"Rust WebSocket bridge has been set for instance {self.id}.")
        self._rust_ws_bridge = bridge_object

    async def ws_send(self, conn_id: str, payload: dict):
        """
        Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

        Args:
            conn_id: Die eindeutige ID der Zielverbindung.
            payload: Ein Dictionary, das als JSON gesendet wird.
        """
        if self._rust_ws_bridge is None:
            self.logger.error("Cannot send WebSocket message: Rust bridge is not initialized.")
            return

        try:
            # Ruft die asynchrone Rust-Methode auf und wartet auf deren Abschluss
            await self._rust_ws_bridge.send_message(conn_id, json.dumps(payload))
        except Exception as e:
            self.logger.error(f"Failed to send WebSocket message to {conn_id}: {e}", exc_info=True)

    async def ws_broadcast(self, channel_id: str, payload: dict, source_conn_id: str = "python_broadcast"):
        """
        Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

        Args:
            channel_id: Der Kanal, an den gesendet werden soll.
            payload: Ein Dictionary, das als JSON gesendet wird.
            source_conn_id (optional): Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.
        """
        if self._rust_ws_bridge is None:
            self.logger.error("Cannot broadcast WebSocket message: Rust bridge is not initialized.")
            return

        try:
            # Ruft die asynchrone Rust-Broadcast-Methode auf
            await self._rust_ws_bridge.broadcast_message(channel_id, json.dumps(payload), source_conn_id)
        except Exception as e:
            self.logger.error(f"Failed to broadcast WebSocket message to channel {channel_id}: {e}", exc_info=True)
disconnect(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
240
241
242
@staticmethod
def disconnect(*args, **kwargs):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
228
229
230
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/toolbox.py
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
    if isinstance(name, tuple):
        return self._get_function(None, as_str=name, **kwargs)
    else:
        return self._get_function(name, **kwargs)
hide_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
232
233
234
@staticmethod
def hide_console(*args, **kwargs):
    """proxi attr"""
init_mod(mod_name, spec='app')

Initializes a module in a thread-safe manner by submitting the asynchronous initialization to the running event loop.

Source code in toolboxv2/utils/toolbox.py
621
622
623
624
625
626
627
628
def init_mod(self, mod_name, spec='app'):
    """
    Initializes a module in a thread-safe manner by submitting the
    asynchronous initialization to the running event loop.
    """
    if '.' in mod_name:
        mod_name = mod_name.split('.')[0]
    self.run_bg_task(self.a_init_mod, mod_name, spec)
run(*args, request=None, running_function_coro=None, **kwargs)

Run a function with support for SSE streaming in both threaded and non-threaded contexts.

Source code in toolboxv2/utils/toolbox.py
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
def run(self, *args, request=None, running_function_coro=None, **kwargs):
    """
    Run a function with support for SSE streaming in both
    threaded and non-threaded contexts.
    """
    if running_function_coro is None:
        mn, fn = args[0]
        if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
            kwargs["request"] = RequestData.from_dict(request)
            if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                kwargs["request"].data = kwargs["request"].body = kwargs['data']
                del kwargs['data']
            if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                       []):
                kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                del kwargs['form_data']

    # Create the coroutine
    coro = running_function_coro or self.a_run_any(*args, **kwargs)

    # Get or create an event loop
    try:
        loop = asyncio.get_event_loop()
        is_running = loop.is_running()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        is_running = False

    # If the loop is already running, run in a separate thread
    if is_running:
        # Create thread pool executor as needed
        if not hasattr(self.__class__, '_executor'):
            self.__class__._executor = ThreadPoolExecutor(max_workers=4)

        def run_in_new_thread():
            # Set up a new loop in this thread
            new_loop = asyncio.new_event_loop()
            asyncio.set_event_loop(new_loop)

            try:
                # Run the coroutine
                return new_loop.run_until_complete(coro)
            finally:
                new_loop.close()

        # Run in thread and get result
        thread_result = self.__class__._executor.submit(run_in_new_thread).result()

        # Handle streaming results from thread
        if isinstance(thread_result, dict) and thread_result.get("is_stream"):
            # Create a new SSE stream in the main thread
            async def stream_from_function():
                # Re-run the function with direct async access
                stream_result = await self.a_run_any(*args, **kwargs)

                if (isinstance(stream_result, Result) and
                    getattr(stream_result.result, 'data_type', None) == "stream"):
                    # Get and forward data from the original generator
                    original_gen = stream_result.result.data.get("generator")
                    if inspect.isasyncgen(original_gen):
                        async for item in original_gen:
                            yield item

            # Return a new streaming Result
            return Result.stream(
                stream_generator=stream_from_function(),
                headers=thread_result.get("headers", {})
            )

        result = thread_result
    else:
        # Direct execution when loop is not running
        result = loop.run_until_complete(coro)

    # Process the final result
    if isinstance(result, Result):
        if 'debug' in self.id:
            result.print()
        if getattr(result.result, 'data_type', None) == "stream":
            return result
        return result.to_api_result().model_dump(mode='json')

    return result
run_bg_task(task, *args, **kwargs)

Runs a coroutine in the background without blocking the caller.

This is the primary method for "fire-and-forget" async tasks. It schedules the coroutine to run on the application's main event loop.

Parameters:

Name Type Description Default
task Callable

The coroutine function to run.

required
*args

Arguments to pass to the coroutine function.

()
**kwargs

Keyword arguments to pass to the coroutine function.

{}

Returns:

Type Description
Task | None

An asyncio.Task object representing the scheduled task, or None if

Task | None

the task could not be scheduled.

Source code in toolboxv2/utils/toolbox.py
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
def run_bg_task(self, task: Callable, *args, **kwargs) -> asyncio.Task | None:
    """
    Runs a coroutine in the background without blocking the caller.

    This is the primary method for "fire-and-forget" async tasks. It schedules
    the coroutine to run on the application's main event loop.

    Args:
        task: The coroutine function to run.
        *args: Arguments to pass to the coroutine function.
        **kwargs: Keyword arguments to pass to the coroutine function.

    Returns:
        An asyncio.Task object representing the scheduled task, or None if
        the task could not be scheduled.
    """
    if not callable(task):
        self.logger.warning("Task passed to run_bg_task is not callable!")
        return None

    if not asyncio.iscoroutinefunction(task) and not asyncio.iscoroutine(task):
        self.logger.warning(f"Task '{getattr(task, '__name__', 'unknown')}' is not a coroutine. "
                            f"Use run_bg_task_advanced for synchronous functions.")
        # Fallback to advanced runner for convenience
        self.run_bg_task_advanced(task, *args, **kwargs)
        return None

    try:
        loop = self.loop_gard()
        if not loop.is_running():
            # If the main loop isn't running, we can't create a task on it.
            # This scenario is handled by run_bg_task_advanced.
            self.logger.info("Main event loop not running. Delegating to advanced background runner.")
            return self.run_bg_task_advanced(task, *args, **kwargs)

        # Create the coroutine if it's a function
        coro = task(*args, **kwargs) if asyncio.iscoroutinefunction(task) else task

        # Create a task on the running event loop
        bg_task = loop.create_task(coro)

        # Add a callback to log exceptions from the background task
        def _log_exception(the_task: asyncio.Task):
            if not the_task.cancelled() and the_task.exception():
                self.logger.error(f"Exception in background task '{the_task.get_name()}':",
                                  exc_info=the_task.exception())

        bg_task.add_done_callback(_log_exception)
        self.bg_tasks.append(bg_task)
        return bg_task

    except Exception as e:
        self.logger.error(f"Failed to schedule background task: {e}", exc_info=True)
        return None
run_bg_task_advanced(task, *args, **kwargs)

Runs a task in a separate, dedicated background thread with its own event loop.

This is ideal for: 1. Running an async task from a synchronous context. 2. Launching a long-running, independent operation that should not interfere with the main application's event loop.

Parameters:

Name Type Description Default
task Callable

The function to run (can be sync or async).

required
*args

Arguments for the task.

()
**kwargs

Keyword arguments for the task.

{}

Returns:

Type Description
Thread

The threading.Thread object managing the background execution.

Source code in toolboxv2/utils/toolbox.py
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
def run_bg_task_advanced(self, task: Callable, *args, **kwargs) -> threading.Thread:
    """
    Runs a task in a separate, dedicated background thread with its own event loop.

    This is ideal for:
    1. Running an async task from a synchronous context.
    2. Launching a long-running, independent operation that should not
       interfere with the main application's event loop.

    Args:
        task: The function to run (can be sync or async).
        *args: Arguments for the task.
        **kwargs: Keyword arguments for the task.

    Returns:
        The threading.Thread object managing the background execution.
    """
    if not callable(task):
        self.logger.warning("Task for run_bg_task_advanced is not callable!")
        return None

    def thread_target():
        # Each thread gets its own event loop.
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Prepare the coroutine we need to run
            if asyncio.iscoroutinefunction(task):
                coro = task(*args, **kwargs)
            elif asyncio.iscoroutine(task):
                # It's already a coroutine object
                coro = task
            else:
                # It's a synchronous function, run it in an executor
                # to avoid blocking the new event loop.
                coro = loop.run_in_executor(None, lambda: task(*args, **kwargs))

            # Run the coroutine to completion
            result = loop.run_until_complete(coro)
            self.logger.debug(f"Advanced background task '{getattr(task, '__name__', 'unknown')}' completed.")
            if result is not None:
                self.logger.debug(f"Task result: {str(result)[:100]}")

        except Exception as e:
            self.logger.error(f"Error in advanced background task '{getattr(task, '__name__', 'unknown')}':",
                              exc_info=e)
        finally:
            # Cleanly shut down the event loop in this thread.
            try:
                all_tasks = asyncio.all_tasks(loop=loop)
                if all_tasks:
                    for t in all_tasks:
                        t.cancel()
                    loop.run_until_complete(asyncio.gather(*all_tasks, return_exceptions=True))
            finally:
                loop.close()
                asyncio.set_event_loop(None)

    # Create, start, and return the thread.
    # It's a daemon thread so it won't prevent the main app from exiting.
    t = threading.Thread(target=thread_target, daemon=True, name=f"BGTask-{getattr(task, '__name__', 'unknown')}")
    self.bg_tasks.append(t)
    t.start()
    return t
show_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
236
237
238
@staticmethod
def show_console(*args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, request_as_kwarg=False, row=False, state=None, level=-1, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None, websocket_handler=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

-1
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None
websocket_handler str

The name of the websocket handler to use.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/toolbox.py
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str | None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       request_as_kwarg: bool = False,
       row: bool = False,
       state: bool | None = None,
       level: int = -1,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       websocket_handler: str | None = None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.
    websocket_handler (str, optional): The name of the websocket handler to use.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  request_as_kwarg=request_as_kwarg,
                                  row=row,
                                  api_methods=api_methods,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl,
                                  websocket_handler=websocket_handler,
                                  )
wait_for_bg_tasks(timeout=None)

Wait for all background tasks to complete.

Parameters:

Name Type Description Default
timeout

Maximum time to wait (in seconds) for all tasks to complete. None means wait indefinitely.

None

Returns:

Name Type Description
bool

True if all tasks completed, False if timeout occurred

Source code in toolboxv2/utils/toolbox.py
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
def wait_for_bg_tasks(self, timeout=None):
    """
    Wait for all background tasks to complete.

    Args:
        timeout: Maximum time to wait (in seconds) for all tasks to complete.
                 None means wait indefinitely.

    Returns:
        bool: True if all tasks completed, False if timeout occurred
    """
    active_tasks = [t for t in self.bg_tasks if t.is_alive()]

    for task in active_tasks:
        task.join(timeout=timeout)
        if task.is_alive():
            return False

    return True
ws_broadcast(channel_id, payload, source_conn_id='python_broadcast') async

Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

Parameters:

Name Type Description Default
channel_id str

Der Kanal, an den gesendet werden soll.

required
payload dict

Ein Dictionary, das als JSON gesendet wird.

required
source_conn_id optional

Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.

'python_broadcast'
Source code in toolboxv2/utils/toolbox.py
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
async def ws_broadcast(self, channel_id: str, payload: dict, source_conn_id: str = "python_broadcast"):
    """
    Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

    Args:
        channel_id: Der Kanal, an den gesendet werden soll.
        payload: Ein Dictionary, das als JSON gesendet wird.
        source_conn_id (optional): Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.
    """
    if self._rust_ws_bridge is None:
        self.logger.error("Cannot broadcast WebSocket message: Rust bridge is not initialized.")
        return

    try:
        # Ruft die asynchrone Rust-Broadcast-Methode auf
        await self._rust_ws_bridge.broadcast_message(channel_id, json.dumps(payload), source_conn_id)
    except Exception as e:
        self.logger.error(f"Failed to broadcast WebSocket message to channel {channel_id}: {e}", exc_info=True)
ws_send(conn_id, payload) async

Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

Parameters:

Name Type Description Default
conn_id str

Die eindeutige ID der Zielverbindung.

required
payload dict

Ein Dictionary, das als JSON gesendet wird.

required
Source code in toolboxv2/utils/toolbox.py
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
async def ws_send(self, conn_id: str, payload: dict):
    """
    Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

    Args:
        conn_id: Die eindeutige ID der Zielverbindung.
        payload: Ein Dictionary, das als JSON gesendet wird.
    """
    if self._rust_ws_bridge is None:
        self.logger.error("Cannot send WebSocket message: Rust bridge is not initialized.")
        return

    try:
        # Ruft die asynchrone Rust-Methode auf und wartet auf deren Abschluss
        await self._rust_ws_bridge.send_message(conn_id, json.dumps(payload))
    except Exception as e:
        self.logger.error(f"Failed to send WebSocket message to {conn_id}: {e}", exc_info=True)

Code

Source code in toolboxv2/utils/security/cryp.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key(as_str=True) -> str or bytes:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        key = Fernet.generate_key()
        if as_str:
            key = key.decode()
        return key

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
101
102
103
104
105
106
107
108
109
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key(as_str=True) staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str or bytes

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
127
128
129
130
131
132
133
134
135
136
137
138
@staticmethod
def generate_symmetric_key(as_str=True) -> str or bytes:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    key = Fernet.generate_key()
    if as_str:
        key = key.decode()
    return key
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
422
423
424
425
426
427
428
429
430
431
432
433
434
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

MainTool

Source code in toolboxv2/utils/system/main_tool.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.tools = kwargs.get("tool", {})
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
            self.on_exit =self.app.tb(
                mod_name=self.name,
                name=kwargs.get("on_exit").__name__,
                version=self.version if hasattr(self, 'version') else "0.0.0",
            )(kwargs.get("on_exit"))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start"))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.tools = kwargs.get("tool", {})
    self.logger = kwargs.get("logs", get_logger())
    self.color = kwargs.get("color", "WHITE")
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
        self.on_exit =self.app.tb(
            mod_name=self.name,
            name=kwargs.get("on_exit").__name__,
            version=self.version if hasattr(self, 'version') else "0.0.0",
        )(kwargs.get("on_exit"))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
174
175
176
177
178
179
180
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
167
168
169
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
164
165
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""

Result

Source code in toolboxv2/utils/system/types.py
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        return self.info.exec_code != 200

    def is_ok(self):
        return not self.is_error()

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: dict | None = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + f'Data_{self.result.data_type}: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{(data[:100]+'...') if not data.endswith('NO Data') else ''}\n")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
844
845
846
847
848
849
850
851
852
853
854
855
856
857
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
943
944
945
946
947
948
949
950
951
952
953
954
955
956
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers dict | None

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: dict | None = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)

Singleton

Singleton metaclass for ensuring only one instance of a class.

Source code in toolboxv2/utils/singelton_class.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
class Singleton(type):
    """
    Singleton metaclass for ensuring only one instance of a class.
    """

    _instances = {}
    _kwargs = {}
    _args = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
            cls._args[cls] = args
            cls._kwargs[cls] = kwargs
        return cls._instances[cls]

Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()
__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
644
645
646
647
648
649
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self
__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
651
652
653
654
655
656
657
658
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()
__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()

TBEF

Automatic generated by ToolBox v = 0.1.22

daemon

DaemonUtil
Source code in toolboxv2/utils/daemon/daemon_util.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
class DaemonUtil:

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.server = None
        self.alive = False
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, t=False,
                        app: (App or AppType) | None = None,
                        peer=False, name='daemonApp-server', on_register=None, on_client_exit=None, on_server_exit=None,
                        unix_socket=False, test_override=False):
        from toolboxv2.mods.SocketManager import SocketType
        self.class_instance = class_instance
        self.server = None
        self.port = port
        self.host = host
        self.alive = False
        self.test_override = test_override
        self._name = name
        if on_register is None:
            def on_register(*args):
                return None
        self._on_register = on_register
        if on_client_exit is None:
            def on_client_exit(*args):
                return None
        self.on_client_exit = on_client_exit
        if on_server_exit is None:
            def on_server_exit():
                return None
        self.on_server_exit = on_server_exit
        self.unix_socket = unix_socket
        self.online = None
        connection_type = SocketType.server
        if peer:
            connection_type = SocketType.peer

        await self.start_server(connection_type)
        app = app if app is not None else get_app(from_=f"DaemonUtil.{self._name}")
        self.online = await asyncio.to_thread(self.connect, app)
        if t:
            await self.online

    async def start_server(self, connection_type=None):
        """Start the server using app and the socket manager"""
        from toolboxv2.mods.SocketManager import SocketType
        if connection_type is None:
            connection_type = SocketType.server
        app = get_app(from_="Starting.Daemon")
        print(app.mod_online("SocketManager"), "SocketManager")
        if not app.mod_online("SocketManager"):
            await app.load_mod("SocketManager")
        server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                            get_results=True,
                                            name=self._name,
                                            host=self.host,
                                            port=self.port,
                                            type_id=connection_type,
                                            max_connections=-1,
                                            return_full_object=True,
                                            test_override=self.test_override,
                                            unix_file=self.unix_socket)
        if server_result.is_error():
            raise Exception(f"Server error: {server_result.print(False)}")
        if not server_result.is_data():
            raise Exception(f"Server error: {server_result.print(False)}")
        self.alive = True
        self.server = server_result
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,

    async def send(self, data: dict or bytes or str, identifier: tuple[str, int] or str = "main"):
        result = await self.server.aget()
        sender = result.get('sender')
        await sender(data, identifier)
        return "Data Transmitted"

    @staticmethod
    async def runner_co(fuction, *args, **kwargs):
        if asyncio.iscoroutinefunction(fuction):
            return await fuction(*args, **kwargs)
        return fuction(*args, **kwargs)

    async def connect(self, app):
        result = await self.server.aget()
        if not isinstance(result, dict) or result.get('connection_error') != 0:
            raise Exception(f"Server error: {result}")
        self.server = Result.ok(result)
        receiver_queue: queue.Queue = self.server.get('receiver_queue')
        client_to_receiver_thread = self.server.get('client_to_receiver_thread')
        running_dict = self.server.get('running_dict')
        sender = self.server.get('sender')
        known_clients = {}
        valid_clients = {}
        app.print(f"Starting Demon {self._name}")

        while self.alive:

            if not receiver_queue.empty():
                data = receiver_queue.get()
                if not data:
                    continue
                if 'identifier' not in data:
                    continue

                identifier = data.get('identifier', 'unknown')
                try:
                    if identifier == "new_con":
                        client, address = data.get('data')
                        get_logger().info(f"New connection: {address}")
                        known_clients[str(address)] = client
                        await client_to_receiver_thread(client, str(address))

                        await self.runner_co(self._on_register, identifier, address)
                        identifier = str(address)
                        # await sender({'ok': 0}, identifier)

                    print("Receiver queue", identifier, identifier in known_clients, identifier in valid_clients)
                    # validation
                    if identifier in known_clients:
                        get_logger().info(identifier)
                        if identifier.startswith("('127.0.0.1'"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        elif data.get("claim", False):
                            do = app.run_any(("CloudM.UserInstances", "validate_ws_id"),
                                             ws_id=data.get("claim"))[0]
                            get_logger().info(do)
                            if do:
                                valid_clients[identifier] = known_clients[identifier]
                                await self.runner_co(self._on_register, identifier, data)
                        elif data.get("key", False) == os.getenv("TB_R_KEY"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        else:
                            get_logger().warning(f"Validating Failed: {identifier}")
                            # sender({'Validating Failed': -1}, eval(identifier))
                        get_logger().info(f"Validating New: {identifier}")
                        del known_clients[identifier]

                    elif identifier in valid_clients:
                        get_logger().info(f"New valid Request: {identifier}")
                        name = data.get('name')
                        args = data.get('args')
                        kwargs = data.get('kwargs')

                        get_logger().info(f"Request data: {name=}{args=}{kwargs=}{identifier=}")

                        if name == 'exit_main':
                            self.alive = False
                            break

                        if name == 'show_console':
                            show_console(True)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'hide_console':
                            show_console(False)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'rrun_flow':
                            show_console(True)
                            runnner = self.class_instance.run_flow
                            threading.Thread(target=runnner, args=args, kwargs=kwargs, daemon=True).start()
                            await sender({'ok': 0}, identifier)
                            show_console(False)
                            continue

                        async def _helper_runner():
                            try:
                                attr_f = getattr(self.class_instance, name)

                                if asyncio.iscoroutinefunction(attr_f):
                                    res = await attr_f(*args, **kwargs)
                                else:
                                    res = attr_f(*args, **kwargs)

                                if res is None:
                                    res = {'data': res}
                                elif isinstance(res, Result):
                                    if asyncio.iscoroutine(res.get()) or isinstance(res.get(), asyncio.Task):
                                        res_ = await res.aget()
                                        res.result.data = res_
                                    res = json.loads(res.to_api_result().json())
                                elif isinstance(res, bytes | dict):
                                    pass
                                else:
                                    res = {'data': 'unsupported type', 'type': str(type(res))}

                                get_logger().info(f"sending response {res} {type(res)}")

                                await sender(res, identifier)
                            except Exception as e:
                                await sender({"data": str(e)}, identifier)

                        await _helper_runner()
                    else:
                        print("Unknown connection data:", data)

                except Exception as e:
                    get_logger().warning(Style.RED(f"An error occurred on {identifier} {str(e)}"))
                    if identifier != "unknown":
                        running_dict["receive"][str(identifier)] = False
                        await self.runner_co(self.on_client_exit,  identifier)
            await asyncio.sleep(0.1)
        running_dict["server_receiver"] = False
        for x in running_dict["receive"]:
            running_dict["receive"][x] = False
        running_dict["keep_alive_var"] = False
        await self.runner_co(self.on_server_exit)
        app.print(f"Closing Demon {self._name}")
        return Result.ok()

    async def a_exit(self):
        result = await self.server.aget()
        await result.get("close")()
        self.alive = False
        if asyncio.iscoroutine(self.online):
            await self.online
        print("Connection result :", result.get("host"), result.get("port"),
              "total connections:", result.get("connections"))
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/daemon/daemon_util.py
19
20
21
22
23
24
25
26
27
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.server = None
    self.alive = False
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/daemon/daemon_util.py
29
30
31
32
33
34
35
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
start_server(connection_type=None) async

Start the server using app and the socket manager

Source code in toolboxv2/utils/daemon/daemon_util.py
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
async def start_server(self, connection_type=None):
    """Start the server using app and the socket manager"""
    from toolboxv2.mods.SocketManager import SocketType
    if connection_type is None:
        connection_type = SocketType.server
    app = get_app(from_="Starting.Daemon")
    print(app.mod_online("SocketManager"), "SocketManager")
    if not app.mod_online("SocketManager"):
        await app.load_mod("SocketManager")
    server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                        get_results=True,
                                        name=self._name,
                                        host=self.host,
                                        port=self.port,
                                        type_id=connection_type,
                                        max_connections=-1,
                                        return_full_object=True,
                                        test_override=self.test_override,
                                        unix_file=self.unix_socket)
    if server_result.is_error():
        raise Exception(f"Server error: {server_result.print(False)}")
    if not server_result.is_data():
        raise Exception(f"Server error: {server_result.print(False)}")
    self.alive = True
    self.server = server_result
daemon_util
DaemonUtil
Source code in toolboxv2/utils/daemon/daemon_util.py
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
class DaemonUtil:

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.server = None
        self.alive = False
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, t=False,
                        app: (App or AppType) | None = None,
                        peer=False, name='daemonApp-server', on_register=None, on_client_exit=None, on_server_exit=None,
                        unix_socket=False, test_override=False):
        from toolboxv2.mods.SocketManager import SocketType
        self.class_instance = class_instance
        self.server = None
        self.port = port
        self.host = host
        self.alive = False
        self.test_override = test_override
        self._name = name
        if on_register is None:
            def on_register(*args):
                return None
        self._on_register = on_register
        if on_client_exit is None:
            def on_client_exit(*args):
                return None
        self.on_client_exit = on_client_exit
        if on_server_exit is None:
            def on_server_exit():
                return None
        self.on_server_exit = on_server_exit
        self.unix_socket = unix_socket
        self.online = None
        connection_type = SocketType.server
        if peer:
            connection_type = SocketType.peer

        await self.start_server(connection_type)
        app = app if app is not None else get_app(from_=f"DaemonUtil.{self._name}")
        self.online = await asyncio.to_thread(self.connect, app)
        if t:
            await self.online

    async def start_server(self, connection_type=None):
        """Start the server using app and the socket manager"""
        from toolboxv2.mods.SocketManager import SocketType
        if connection_type is None:
            connection_type = SocketType.server
        app = get_app(from_="Starting.Daemon")
        print(app.mod_online("SocketManager"), "SocketManager")
        if not app.mod_online("SocketManager"):
            await app.load_mod("SocketManager")
        server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                            get_results=True,
                                            name=self._name,
                                            host=self.host,
                                            port=self.port,
                                            type_id=connection_type,
                                            max_connections=-1,
                                            return_full_object=True,
                                            test_override=self.test_override,
                                            unix_file=self.unix_socket)
        if server_result.is_error():
            raise Exception(f"Server error: {server_result.print(False)}")
        if not server_result.is_data():
            raise Exception(f"Server error: {server_result.print(False)}")
        self.alive = True
        self.server = server_result
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,

    async def send(self, data: dict or bytes or str, identifier: tuple[str, int] or str = "main"):
        result = await self.server.aget()
        sender = result.get('sender')
        await sender(data, identifier)
        return "Data Transmitted"

    @staticmethod
    async def runner_co(fuction, *args, **kwargs):
        if asyncio.iscoroutinefunction(fuction):
            return await fuction(*args, **kwargs)
        return fuction(*args, **kwargs)

    async def connect(self, app):
        result = await self.server.aget()
        if not isinstance(result, dict) or result.get('connection_error') != 0:
            raise Exception(f"Server error: {result}")
        self.server = Result.ok(result)
        receiver_queue: queue.Queue = self.server.get('receiver_queue')
        client_to_receiver_thread = self.server.get('client_to_receiver_thread')
        running_dict = self.server.get('running_dict')
        sender = self.server.get('sender')
        known_clients = {}
        valid_clients = {}
        app.print(f"Starting Demon {self._name}")

        while self.alive:

            if not receiver_queue.empty():
                data = receiver_queue.get()
                if not data:
                    continue
                if 'identifier' not in data:
                    continue

                identifier = data.get('identifier', 'unknown')
                try:
                    if identifier == "new_con":
                        client, address = data.get('data')
                        get_logger().info(f"New connection: {address}")
                        known_clients[str(address)] = client
                        await client_to_receiver_thread(client, str(address))

                        await self.runner_co(self._on_register, identifier, address)
                        identifier = str(address)
                        # await sender({'ok': 0}, identifier)

                    print("Receiver queue", identifier, identifier in known_clients, identifier in valid_clients)
                    # validation
                    if identifier in known_clients:
                        get_logger().info(identifier)
                        if identifier.startswith("('127.0.0.1'"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        elif data.get("claim", False):
                            do = app.run_any(("CloudM.UserInstances", "validate_ws_id"),
                                             ws_id=data.get("claim"))[0]
                            get_logger().info(do)
                            if do:
                                valid_clients[identifier] = known_clients[identifier]
                                await self.runner_co(self._on_register, identifier, data)
                        elif data.get("key", False) == os.getenv("TB_R_KEY"):
                            valid_clients[identifier] = known_clients[identifier]
                            await self.runner_co(self._on_register, identifier, data)
                        else:
                            get_logger().warning(f"Validating Failed: {identifier}")
                            # sender({'Validating Failed': -1}, eval(identifier))
                        get_logger().info(f"Validating New: {identifier}")
                        del known_clients[identifier]

                    elif identifier in valid_clients:
                        get_logger().info(f"New valid Request: {identifier}")
                        name = data.get('name')
                        args = data.get('args')
                        kwargs = data.get('kwargs')

                        get_logger().info(f"Request data: {name=}{args=}{kwargs=}{identifier=}")

                        if name == 'exit_main':
                            self.alive = False
                            break

                        if name == 'show_console':
                            show_console(True)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'hide_console':
                            show_console(False)
                            await sender({'ok': 0}, identifier)
                            continue

                        if name == 'rrun_flow':
                            show_console(True)
                            runnner = self.class_instance.run_flow
                            threading.Thread(target=runnner, args=args, kwargs=kwargs, daemon=True).start()
                            await sender({'ok': 0}, identifier)
                            show_console(False)
                            continue

                        async def _helper_runner():
                            try:
                                attr_f = getattr(self.class_instance, name)

                                if asyncio.iscoroutinefunction(attr_f):
                                    res = await attr_f(*args, **kwargs)
                                else:
                                    res = attr_f(*args, **kwargs)

                                if res is None:
                                    res = {'data': res}
                                elif isinstance(res, Result):
                                    if asyncio.iscoroutine(res.get()) or isinstance(res.get(), asyncio.Task):
                                        res_ = await res.aget()
                                        res.result.data = res_
                                    res = json.loads(res.to_api_result().json())
                                elif isinstance(res, bytes | dict):
                                    pass
                                else:
                                    res = {'data': 'unsupported type', 'type': str(type(res))}

                                get_logger().info(f"sending response {res} {type(res)}")

                                await sender(res, identifier)
                            except Exception as e:
                                await sender({"data": str(e)}, identifier)

                        await _helper_runner()
                    else:
                        print("Unknown connection data:", data)

                except Exception as e:
                    get_logger().warning(Style.RED(f"An error occurred on {identifier} {str(e)}"))
                    if identifier != "unknown":
                        running_dict["receive"][str(identifier)] = False
                        await self.runner_co(self.on_client_exit,  identifier)
            await asyncio.sleep(0.1)
        running_dict["server_receiver"] = False
        for x in running_dict["receive"]:
            running_dict["receive"][x] = False
        running_dict["keep_alive_var"] = False
        await self.runner_co(self.on_server_exit)
        app.print(f"Closing Demon {self._name}")
        return Result.ok()

    async def a_exit(self):
        result = await self.server.aget()
        await result.get("close")()
        self.alive = False
        if asyncio.iscoroutine(self.online):
            await self.online
        print("Connection result :", result.get("host"), result.get("port"),
              "total connections:", result.get("connections"))
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/daemon/daemon_util.py
19
20
21
22
23
24
25
26
27
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.server = None
    self.alive = False
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/daemon/daemon_util.py
29
30
31
32
33
34
35
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
start_server(connection_type=None) async

Start the server using app and the socket manager

Source code in toolboxv2/utils/daemon/daemon_util.py
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
async def start_server(self, connection_type=None):
    """Start the server using app and the socket manager"""
    from toolboxv2.mods.SocketManager import SocketType
    if connection_type is None:
        connection_type = SocketType.server
    app = get_app(from_="Starting.Daemon")
    print(app.mod_online("SocketManager"), "SocketManager")
    if not app.mod_online("SocketManager"):
        await app.load_mod("SocketManager")
    server_result = await app.a_run_any(SOCKETMANAGER.CREATE_SOCKET,
                                        get_results=True,
                                        name=self._name,
                                        host=self.host,
                                        port=self.port,
                                        type_id=connection_type,
                                        max_connections=-1,
                                        return_full_object=True,
                                        test_override=self.test_override,
                                        unix_file=self.unix_socket)
    if server_result.is_error():
        raise Exception(f"Server error: {server_result.print(False)}")
    if not server_result.is_data():
        raise Exception(f"Server error: {server_result.print(False)}")
    self.alive = True
    self.server = server_result

extras

BaseWidget
Source code in toolboxv2/utils/extras/base_widget.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
class BaseWidget:
    def __init__(self, name: str):
        self.name = name
        self.openWidgetsIDs = {}
        self.onReload = []
        self.iframes = {}

    def register(self, app, fuction, version=None, name="get_widget", level=1, **kwargs):
        if version is None:
            version = app.version
        app.tb(mod_name=self.name, version=version, request_as_kwarg=True, level=level, api=True, name=name, **kwargs)(
            fuction)

    def modify_iterator(self, iterator, replace):
        """
        ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
        {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
        """

        for item in iterator:
            modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                             range(len(replace))}
            yield modified_item

    def register2reload(self, *functions):
        for fuction in functions:
            def x(r):
                return fuction(request=r)
            self.onReload.append(x)

    def reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = function()
        return c

    async def oa_reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = await function() if asyncio.iscoroutinefunction(function) else function()
        return c

    @staticmethod
    def get_a_group(asset_name, template=None, file_path=None, a_kwargs=None):
        if a_kwargs is None:
            raise ValueError("a_kwargs must be specified")
        return [{'name': asset_name,
                 'file_path': file_path,
                 'kwargs': a_kwargs
                 } if file_path is not None else {'name': asset_name,
                                                  'template': template,
                                                  'kwargs': a_kwargs
                                                  }]

    def group_generator(self, asset_name: str, iterator: iter, template=None, file_path=None, a_kwargs=None):
        groups = []
        work_kwargs = a_kwargs
        for _i, data in enumerate(iterator):
            if isinstance(data, dict):
                work_kwargs = {**a_kwargs, **data}
            groups.append(self.get_a_group(asset_name, template=template, file_path=file_path, a_kwargs=work_kwargs))
        return groups

    def asset_loder(self, app, name, asset_id, file_path=None, template=None, iterator=None, **kwargs):
        a_kwargs = {**{
            'root': f"/api/{self.name}",
            'WidgetID': asset_id},
                    **kwargs}
        asset_name = f"{name}-{asset_id}"
        if iterator is None:
            group = self.get_a_group(asset_name,
                                     template=template,
                                     file_path=file_path,
                                     a_kwargs=a_kwargs)
        else:
            group = self.group_generator(asset_name,
                                         iterator=iterator,
                                         template=template,
                                         file_path=file_path,
                                         a_kwargs=a_kwargs)

        asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                            group_name=self.name,
                            collection={'name': f"{asset_name}",
                                        'group': group},
                            get_results=True)
        if asset.is_error():
            app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
            asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                                group_name=self.name,
                                collection={'name': f"{self.name}-{asset_name}",
                                            'group': group},
                                get_results=True)
        return asset

    def generate_html(self, app, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        return app.run_any(MINIMALHTML.GENERATE_HTML,
                           group_name=self.name,
                           collection_name=f"{name}-{asset_id}")

    def load_widget(self, app, request, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
        self.reload(request)
        html_widget = self.generate_html(app, name, asset_id)
        return html_widget[0]['html_element']

    @staticmethod
    async def get_user_from_request(app, request):
        from toolboxv2.mods.CloudM import User
        if request is None:
            return User()
        return await get_current_user_from_request(app, request)

    @staticmethod
    def get_s_id(request):
        from ..system.types import Result
        if request is None:
            return Result.default_internal_error("No request specified")
        return Result.ok(request.session.get('ID', ''))

    def reload(self, request):
        [_(request) for _ in self.onReload]

    async def oa_reload(self, request):
        [_(request) if not asyncio.iscoroutinefunction(_) else await _(request) for _ in self.onReload]

    async def get_widget(self, request, **kwargs):
        raise NotImplementedError

    def hash_wrapper(self, _id, _salt=''):
        from ..security.cryp import Code
        return Code.one_way_hash(text=_id, salt=_salt, pepper=self.name)

    def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
        """
        Registriert einen iframe mit gegebener ID und Quelle

        Args:
            iframe_id: Eindeutige ID für den iframe
            src: URL oder Pfad zur Quelle des iframes
            width: Breite des iframes (default: "100%")
            height: Höhe des iframes (default: "500px")
            **kwargs: Weitere iframe-Attribute
        """
        iframe_config = {
            'src': src,
            'width': width,
            'height': height,
            **kwargs
        }
        self.iframes[iframe_id] = iframe_config

    def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
        """
        Erstellt ein Asset für einen registrierten iframe

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        if iframe_id not in self.iframes:
            raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

        if asset_id is None:
            asset_id = str(uuid.uuid4())[:4]

        iframe_config = self.iframes[iframe_id]
        iframe_template = """
        <iframe id="{iframe_id}"
                src="{src}"
                width="{width}"
                height="{height}"
                frameborder="0"
                {additional_attrs}></iframe>
        """.strip()

        # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
        known_attrs = {'src', 'width', 'height'}
        additional_attrs = ' '.join(
            f'{k}="{v}"' for k, v in iframe_config.items()
            if k not in known_attrs
        )

        iframe_html = iframe_template.format(
            iframe_id=iframe_id,
            src=iframe_config['src'],
            width=iframe_config['width'],
            height=iframe_config['height'],
            additional_attrs=additional_attrs
        )

        return self.asset_loder(
            app=app,
            name=f"iframe-{iframe_id}",
            asset_id=asset_id,
            template=iframe_html
        )

    def load_iframe(self, app, iframe_id: str, asset_id: str = None):
        """
        Lädt einen registrierten iframe und gibt das HTML-Element zurück

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        self.create_iframe_asset(app, iframe_id, asset_id)
        return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
create_iframe_asset(app, iframe_id, asset_id=None)

Erstellt ein Asset für einen registrierten iframe

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
    """
    Erstellt ein Asset für einen registrierten iframe

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    if iframe_id not in self.iframes:
        raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

    if asset_id is None:
        asset_id = str(uuid.uuid4())[:4]

    iframe_config = self.iframes[iframe_id]
    iframe_template = """
    <iframe id="{iframe_id}"
            src="{src}"
            width="{width}"
            height="{height}"
            frameborder="0"
            {additional_attrs}></iframe>
    """.strip()

    # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
    known_attrs = {'src', 'width', 'height'}
    additional_attrs = ' '.join(
        f'{k}="{v}"' for k, v in iframe_config.items()
        if k not in known_attrs
    )

    iframe_html = iframe_template.format(
        iframe_id=iframe_id,
        src=iframe_config['src'],
        width=iframe_config['width'],
        height=iframe_config['height'],
        additional_attrs=additional_attrs
    )

    return self.asset_loder(
        app=app,
        name=f"iframe-{iframe_id}",
        asset_id=asset_id,
        template=iframe_html
    )
load_iframe(app, iframe_id, asset_id=None)

Lädt einen registrierten iframe und gibt das HTML-Element zurück

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
280
281
282
283
284
285
286
287
288
289
290
def load_iframe(self, app, iframe_id: str, asset_id: str = None):
    """
    Lädt einen registrierten iframe und gibt das HTML-Element zurück

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    self.create_iframe_asset(app, iframe_id, asset_id)
    return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
modify_iterator(iterator, replace)

['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'}, {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]

Source code in toolboxv2/utils/extras/base_widget.py
 94
 95
 96
 97
 98
 99
100
101
102
103
def modify_iterator(self, iterator, replace):
    """
    ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
    {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
    """

    for item in iterator:
        modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                         range(len(replace))}
        yield modified_item
register_iframe(iframe_id, src, width='100%', height='500px', **kwargs)

Registriert einen iframe mit gegebener ID und Quelle

Parameters:

Name Type Description Default
iframe_id str

Eindeutige ID für den iframe

required
src str

URL oder Pfad zur Quelle des iframes

required
width str

Breite des iframes (default: "100%")

'100%'
height str

Höhe des iframes (default: "500px")

'500px'
**kwargs

Weitere iframe-Attribute

{}
Source code in toolboxv2/utils/extras/base_widget.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
    """
    Registriert einen iframe mit gegebener ID und Quelle

    Args:
        iframe_id: Eindeutige ID für den iframe
        src: URL oder Pfad zur Quelle des iframes
        width: Breite des iframes (default: "100%")
        height: Höhe des iframes (default: "500px")
        **kwargs: Weitere iframe-Attribute
    """
    iframe_config = {
        'src': src,
        'width': width,
        'height': height,
        **kwargs
    }
    self.iframes[iframe_id] = iframe_config
Style
Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()
__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
644
645
646
647
648
649
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self
__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
651
652
653
654
655
656
657
658
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()
__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()
SpinnerManager

Manages multiple spinners to ensure tqdm-like line rendering. Automatically captures SIGINT (Ctrl+C) to stop all spinners.

Source code in toolboxv2/utils/extras/Style.py
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
class SpinnerManager(metaclass=Singleton):
    """
    Manages multiple spinners to ensure tqdm-like line rendering.
    Automatically captures SIGINT (Ctrl+C) to stop all spinners.
    """
    _instance = None

    def __new__(cls):
        if not cls._instance:
            cls._instance = super().__new__(cls)
            cls._instance._init_manager()
        return cls._instance

    def _init_manager(self):
        """Initialize spinner management resources and register SIGINT handler."""
        self._spinners = []
        self._lock = threading.Lock()
        self._render_thread = None
        self._should_run = False
        try:
            signal.signal(signal.SIGINT, self._signal_handler)
        except ValueError:
            print("Spinner Manager not in the min Thread no signal possible")
            pass

    def _signal_handler(self, signum, frame):
        """Handle SIGINT by stopping all spinners gracefully."""
        with self._lock:
            for spinner in self._spinners:
                spinner.running = False
            self._spinners.clear()
        self._should_run = False
        sys.stdout.write("\r\033[K")  # Clear the spinner's line.
        sys.stdout.flush()
        sys.exit(0)

    def register_spinner(self, spinner):
        """Register a new spinner."""
        with self._lock:
            # First spinner defines the rendering line.
            if not self._spinners:
                spinner._is_primary = True
            self._spinners.append(spinner)
            # Start rendering if not already running.
            if not self._should_run:
                self._should_run = True
                self._render_thread = threading.Thread(
                    target=self._render_loop,
                    daemon=True
                )
                self._render_thread.start()

    def unregister_spinner(self, spinner):
        """Unregister a completed spinner."""
        with self._lock:
            if spinner in self._spinners:
                self._spinners.remove(spinner)

    def _render_loop(self):
        """Continuous rendering loop for all active spinners."""
        while self._should_run:
            if not self._spinners:
                self._should_run = False
                break

            with self._lock:
                # Find primary spinner (first registered).
                primary_spinner = next((s for s in self._spinners if s._is_primary), None)

                if primary_spinner and primary_spinner.running:
                    # Render in the same line.
                    render_line = primary_spinner._generate_render_line()

                    # Append additional spinner info if multiple exist.
                    if len(self._spinners) > 1:
                        secondary_info = " | ".join(
                            s._generate_secondary_info()
                            for s in self._spinners
                            if s is not primary_spinner and s.running
                        )
                        render_line += f" [{secondary_info}]"

                    # Clear line and write.
                    try:
                        sys.stdout.write("\r" + render_line + "\033[K")
                        sys.stdout.flush()
                    except Exception:
                        self._should_run = False

            time.sleep(0.1)  # Render interval.
register_spinner(spinner)

Register a new spinner.

Source code in toolboxv2/utils/extras/Style.py
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def register_spinner(self, spinner):
    """Register a new spinner."""
    with self._lock:
        # First spinner defines the rendering line.
        if not self._spinners:
            spinner._is_primary = True
        self._spinners.append(spinner)
        # Start rendering if not already running.
        if not self._should_run:
            self._should_run = True
            self._render_thread = threading.Thread(
                target=self._render_loop,
                daemon=True
            )
            self._render_thread.start()
unregister_spinner(spinner)

Unregister a completed spinner.

Source code in toolboxv2/utils/extras/Style.py
539
540
541
542
543
def unregister_spinner(self, spinner):
    """Unregister a completed spinner."""
    with self._lock:
        if spinner in self._spinners:
            self._spinners.remove(spinner)
base_widget
BaseWidget
Source code in toolboxv2/utils/extras/base_widget.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
class BaseWidget:
    def __init__(self, name: str):
        self.name = name
        self.openWidgetsIDs = {}
        self.onReload = []
        self.iframes = {}

    def register(self, app, fuction, version=None, name="get_widget", level=1, **kwargs):
        if version is None:
            version = app.version
        app.tb(mod_name=self.name, version=version, request_as_kwarg=True, level=level, api=True, name=name, **kwargs)(
            fuction)

    def modify_iterator(self, iterator, replace):
        """
        ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
        {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
        """

        for item in iterator:
            modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                             range(len(replace))}
            yield modified_item

    def register2reload(self, *functions):
        for fuction in functions:
            def x(r):
                return fuction(request=r)
            self.onReload.append(x)

    def reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = function()
        return c

    async def oa_reload_guard(self, function):
        c = None
        if len(self.onReload) == 0:
            c = await function() if asyncio.iscoroutinefunction(function) else function()
        return c

    @staticmethod
    def get_a_group(asset_name, template=None, file_path=None, a_kwargs=None):
        if a_kwargs is None:
            raise ValueError("a_kwargs must be specified")
        return [{'name': asset_name,
                 'file_path': file_path,
                 'kwargs': a_kwargs
                 } if file_path is not None else {'name': asset_name,
                                                  'template': template,
                                                  'kwargs': a_kwargs
                                                  }]

    def group_generator(self, asset_name: str, iterator: iter, template=None, file_path=None, a_kwargs=None):
        groups = []
        work_kwargs = a_kwargs
        for _i, data in enumerate(iterator):
            if isinstance(data, dict):
                work_kwargs = {**a_kwargs, **data}
            groups.append(self.get_a_group(asset_name, template=template, file_path=file_path, a_kwargs=work_kwargs))
        return groups

    def asset_loder(self, app, name, asset_id, file_path=None, template=None, iterator=None, **kwargs):
        a_kwargs = {**{
            'root': f"/api/{self.name}",
            'WidgetID': asset_id},
                    **kwargs}
        asset_name = f"{name}-{asset_id}"
        if iterator is None:
            group = self.get_a_group(asset_name,
                                     template=template,
                                     file_path=file_path,
                                     a_kwargs=a_kwargs)
        else:
            group = self.group_generator(asset_name,
                                         iterator=iterator,
                                         template=template,
                                         file_path=file_path,
                                         a_kwargs=a_kwargs)

        asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                            group_name=self.name,
                            collection={'name': f"{asset_name}",
                                        'group': group},
                            get_results=True)
        if asset.is_error():
            app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
            asset = app.run_any(MINIMALHTML.ADD_COLLECTION_TO_GROUP,
                                group_name=self.name,
                                collection={'name': f"{self.name}-{asset_name}",
                                            'group': group},
                                get_results=True)
        return asset

    def generate_html(self, app, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        return app.run_any(MINIMALHTML.GENERATE_HTML,
                           group_name=self.name,
                           collection_name=f"{name}-{asset_id}")

    def load_widget(self, app, request, name="MainWidget", asset_id=str(uuid.uuid4())[:4]):
        app.run_any(MINIMALHTML.ADD_GROUP, command=self.name)
        self.reload(request)
        html_widget = self.generate_html(app, name, asset_id)
        return html_widget[0]['html_element']

    @staticmethod
    async def get_user_from_request(app, request):
        from toolboxv2.mods.CloudM import User
        if request is None:
            return User()
        return await get_current_user_from_request(app, request)

    @staticmethod
    def get_s_id(request):
        from ..system.types import Result
        if request is None:
            return Result.default_internal_error("No request specified")
        return Result.ok(request.session.get('ID', ''))

    def reload(self, request):
        [_(request) for _ in self.onReload]

    async def oa_reload(self, request):
        [_(request) if not asyncio.iscoroutinefunction(_) else await _(request) for _ in self.onReload]

    async def get_widget(self, request, **kwargs):
        raise NotImplementedError

    def hash_wrapper(self, _id, _salt=''):
        from ..security.cryp import Code
        return Code.one_way_hash(text=_id, salt=_salt, pepper=self.name)

    def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
        """
        Registriert einen iframe mit gegebener ID und Quelle

        Args:
            iframe_id: Eindeutige ID für den iframe
            src: URL oder Pfad zur Quelle des iframes
            width: Breite des iframes (default: "100%")
            height: Höhe des iframes (default: "500px")
            **kwargs: Weitere iframe-Attribute
        """
        iframe_config = {
            'src': src,
            'width': width,
            'height': height,
            **kwargs
        }
        self.iframes[iframe_id] = iframe_config

    def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
        """
        Erstellt ein Asset für einen registrierten iframe

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        if iframe_id not in self.iframes:
            raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

        if asset_id is None:
            asset_id = str(uuid.uuid4())[:4]

        iframe_config = self.iframes[iframe_id]
        iframe_template = """
        <iframe id="{iframe_id}"
                src="{src}"
                width="{width}"
                height="{height}"
                frameborder="0"
                {additional_attrs}></iframe>
        """.strip()

        # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
        known_attrs = {'src', 'width', 'height'}
        additional_attrs = ' '.join(
            f'{k}="{v}"' for k, v in iframe_config.items()
            if k not in known_attrs
        )

        iframe_html = iframe_template.format(
            iframe_id=iframe_id,
            src=iframe_config['src'],
            width=iframe_config['width'],
            height=iframe_config['height'],
            additional_attrs=additional_attrs
        )

        return self.asset_loder(
            app=app,
            name=f"iframe-{iframe_id}",
            asset_id=asset_id,
            template=iframe_html
        )

    def load_iframe(self, app, iframe_id: str, asset_id: str = None):
        """
        Lädt einen registrierten iframe und gibt das HTML-Element zurück

        Args:
            app: App-Instanz
            iframe_id: ID des registrierten iframes
            asset_id: Optional, spezifische Asset-ID
        """
        self.create_iframe_asset(app, iframe_id, asset_id)
        return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
create_iframe_asset(app, iframe_id, asset_id=None)

Erstellt ein Asset für einen registrierten iframe

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
def create_iframe_asset(self, app, iframe_id: str, asset_id: str = None):
    """
    Erstellt ein Asset für einen registrierten iframe

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    if iframe_id not in self.iframes:
        raise ValueError(f"iframe mit ID {iframe_id} nicht registriert")

    if asset_id is None:
        asset_id = str(uuid.uuid4())[:4]

    iframe_config = self.iframes[iframe_id]
    iframe_template = """
    <iframe id="{iframe_id}"
            src="{src}"
            width="{width}"
            height="{height}"
            frameborder="0"
            {additional_attrs}></iframe>
    """.strip()

    # Filtere bekannte Attribute heraus und erstelle String für zusätzliche Attribute
    known_attrs = {'src', 'width', 'height'}
    additional_attrs = ' '.join(
        f'{k}="{v}"' for k, v in iframe_config.items()
        if k not in known_attrs
    )

    iframe_html = iframe_template.format(
        iframe_id=iframe_id,
        src=iframe_config['src'],
        width=iframe_config['width'],
        height=iframe_config['height'],
        additional_attrs=additional_attrs
    )

    return self.asset_loder(
        app=app,
        name=f"iframe-{iframe_id}",
        asset_id=asset_id,
        template=iframe_html
    )
load_iframe(app, iframe_id, asset_id=None)

Lädt einen registrierten iframe und gibt das HTML-Element zurück

Parameters:

Name Type Description Default
app

App-Instanz

required
iframe_id str

ID des registrierten iframes

required
asset_id str

Optional, spezifische Asset-ID

None
Source code in toolboxv2/utils/extras/base_widget.py
280
281
282
283
284
285
286
287
288
289
290
def load_iframe(self, app, iframe_id: str, asset_id: str = None):
    """
    Lädt einen registrierten iframe und gibt das HTML-Element zurück

    Args:
        app: App-Instanz
        iframe_id: ID des registrierten iframes
        asset_id: Optional, spezifische Asset-ID
    """
    self.create_iframe_asset(app, iframe_id, asset_id)
    return self.generate_html(app, f"iframe-{iframe_id}", asset_id)[0]['html_element']
modify_iterator(iterator, replace)

['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'}, {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]

Source code in toolboxv2/utils/extras/base_widget.py
 94
 95
 96
 97
 98
 99
100
101
102
103
def modify_iterator(self, iterator, replace):
    """
    ['a', 'b'] -> [{replace[0]: 'a',..., replace[len(replace)-1]: 'a'},
    {replace[0]: 'b',..., replace[len(replace)-1]: 'b'}, ]
    """

    for item in iterator:
        modified_item = {replace[i]: (self.name if replace[i] == "name" else '') + item for i in
                         range(len(replace))}
        yield modified_item
register_iframe(iframe_id, src, width='100%', height='500px', **kwargs)

Registriert einen iframe mit gegebener ID und Quelle

Parameters:

Name Type Description Default
iframe_id str

Eindeutige ID für den iframe

required
src str

URL oder Pfad zur Quelle des iframes

required
width str

Breite des iframes (default: "100%")

'100%'
height str

Höhe des iframes (default: "500px")

'500px'
**kwargs

Weitere iframe-Attribute

{}
Source code in toolboxv2/utils/extras/base_widget.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
def register_iframe(self, iframe_id: str, src: str, width: str = "100%", height: str = "500px", **kwargs):
    """
    Registriert einen iframe mit gegebener ID und Quelle

    Args:
        iframe_id: Eindeutige ID für den iframe
        src: URL oder Pfad zur Quelle des iframes
        width: Breite des iframes (default: "100%")
        height: Höhe des iframes (default: "500px")
        **kwargs: Weitere iframe-Attribute
    """
    iframe_config = {
        'src': src,
        'width': width,
        'height': height,
        **kwargs
    }
    self.iframes[iframe_id] = iframe_config
blobs
BlobFile
Source code in toolboxv2/utils/extras/blobs.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
class BlobFile(io.IOBase):
    def __init__(self, filename: str, mode: str = 'r', storage: BlobStorage = None, key: str = None,
                 servers: list[str] = None):
        if not isinstance(filename, str) or not filename:
            raise ValueError("Filename must be a non-empty string.")
        if not filename.startswith('/'): filename = '/' + filename
        self.filename = filename.lstrip('/\\')
        self.blob_id, self.folder, self.datei = self._path_splitter(self.filename)
        self.mode = mode

        if storage is None:
            # In a real app, dependency injection or a global factory would be better
            # but this provides a fallback for simple scripts.
            if not servers:
                from toolboxv2 import get_app
                storage = get_app(from_="BlobStorage").root_blob_storage
            else:
                storage = BlobStorage(servers=servers)

        self.storage = storage
        self.data_buffer = b""
        self.key = key
        if key:
            try:
                assert Code.decrypt_symmetric(Code.encrypt_symmetric(b"test", key), key, to_str=False) == b"test"
            except Exception:
                raise ValueError("Invalid symmetric key provided.")

    @staticmethod
    def _path_splitter(filename):
        parts = Path(filename).parts
        if not parts: raise ValueError("Filename cannot be empty.")
        blob_id = parts[0]
        if len(parts) == 1: raise ValueError("Filename must include a path within the blob, e.g., 'blob_id/file.txt'")
        datei = parts[-1]
        folder = '|'.join(parts[1:-1])
        return blob_id, folder, datei

    def create(self):
        self.storage.create_blob(pickle.dumps({}), self.blob_id)
        return self

    def __enter__(self):
        try:
            raw_blob_data = self.storage.read_blob(self.blob_id)
            if raw_blob_data != b'' and (not raw_blob_data or raw_blob_data is None):
                raw_blob_data = b""
            blob_content = pickle.loads(raw_blob_data)
        except (requests.exceptions.HTTPError, EOFError, pickle.UnpicklingError, ConnectionError) as e:
            if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 404:
                blob_content = {}  # Blob doesn't exist yet, treat as empty
            elif isinstance(e, EOFError | pickle.UnpicklingError):
                blob_content = {}  # Blob is empty or corrupt, treat as empty for writing
            else:
                self.storage.create_blob(blob_id=self.blob_id, data=pickle.dumps({}))
                blob_content = {}

        if 'r' in self.mode:
            path_key = self.folder if self.folder else self.datei
            if self.folder:
                file_data = blob_content.get(self.folder, {}).get(self.datei)
            else:
                file_data = blob_content.get(self.datei)

            if file_data:
                self.data_buffer = file_data
                if self.key:
                    self.data_buffer = Code.decrypt_symmetric(self.data_buffer, self.key, to_str=False)
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        if 'w' in self.mode:
            final_data = self.data_buffer
            if self.key:
                final_data = Code.encrypt_symmetric(final_data, self.key)

            try:
                raw_blob_data = self.storage.read_blob(self.blob_id)
                blob_content = pickle.loads(raw_blob_data)
            except Exception:
                blob_content = {}

            # Safely navigate and create path
            current_level = blob_content
            if self.folder:
                if self.folder not in current_level:
                    current_level[self.folder] = {}
                current_level = current_level[self.folder]

            current_level[self.datei] = final_data
            self.storage.update_blob(self.blob_id, pickle.dumps(blob_content))




    def exists(self) -> bool:
        """
        Checks if the specific file path exists within the blob without reading its content.
        This is an efficient, read-only operation.

        Returns:
            bool: True if the file exists within the blob, False otherwise.
        """
        try:
            # Fetch the raw blob data. This leverages the local cache if available.
            raw_blob_data = self.storage.read_blob(self.blob_id)
            # Unpickle the directory structure.
            if raw_blob_data:
                blob_content = pickle.loads(raw_blob_data)
            else:
                return False
        except (requests.exceptions.HTTPError, EOFError, pickle.UnpicklingError, ConnectionError):
            # If the blob itself doesn't exist, is empty, or can't be reached,
            # then the file within it cannot exist.
            return False

        # Navigate the dictionary to check for the file's existence.
        current_level = blob_content
        if self.folder:
            if self.folder not in current_level:
                return False
            current_level = current_level[self.folder]

        return self.datei in current_level

    def clear(self):
        self.data_buffer = b''

    def write(self, data):
        if 'w' not in self.mode: raise OSError("File not opened in write mode.")
        if isinstance(data, str):
            self.data_buffer += data.encode()
        elif isinstance(data, bytes):
            self.data_buffer += data
        else:
            raise TypeError("write() argument must be str or bytes")

    def read(self):
        if 'r' not in self.mode: raise OSError("File not opened in read mode.")
        return self.data_buffer

    def read_json(self):
        if 'r' not in self.mode: raise ValueError("File not opened in read mode.")
        if self.data_buffer == b"": return {}
        return json.loads(self.data_buffer.decode())

    def write_json(self, data):
        if 'w' not in self.mode: raise ValueError("File not opened in write mode.")
        self.data_buffer += json.dumps(data).encode()

    def read_pickle(self):
        if 'r' not in self.mode: raise ValueError("File not opened in read mode.")
        if self.data_buffer == b"": return {}
        return pickle.loads(self.data_buffer)

    def write_pickle(self, data):
        if 'w' not in self.mode: raise ValueError("File not opened in write mode.")
        self.data_buffer += pickle.dumps(data)

    def read_yaml(self):
        if 'r' not in self.mode: raise ValueError("File not opened in read mode.")
        if self.data_buffer == b"": return {}
        return yaml.safe_load(self.data_buffer)

    def write_yaml(self, data):
        if 'w' not in self.mode: raise ValueError("File not opened in write mode.")
        yaml.dump(data, self)
exists()

Checks if the specific file path exists within the blob without reading its content. This is an efficient, read-only operation.

Returns:

Name Type Description
bool bool

True if the file exists within the blob, False otherwise.

Source code in toolboxv2/utils/extras/blobs.py
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def exists(self) -> bool:
    """
    Checks if the specific file path exists within the blob without reading its content.
    This is an efficient, read-only operation.

    Returns:
        bool: True if the file exists within the blob, False otherwise.
    """
    try:
        # Fetch the raw blob data. This leverages the local cache if available.
        raw_blob_data = self.storage.read_blob(self.blob_id)
        # Unpickle the directory structure.
        if raw_blob_data:
            blob_content = pickle.loads(raw_blob_data)
        else:
            return False
    except (requests.exceptions.HTTPError, EOFError, pickle.UnpicklingError, ConnectionError):
        # If the blob itself doesn't exist, is empty, or can't be reached,
        # then the file within it cannot exist.
        return False

    # Navigate the dictionary to check for the file's existence.
    current_level = blob_content
    if self.folder:
        if self.folder not in current_level:
            return False
        current_level = current_level[self.folder]

    return self.datei in current_level
BlobStorage

A production-ready client for the distributed blob storage server. It handles communication with a list of server instances, manages a local cache, and implements backoff/retry logic for resilience.

Source code in toolboxv2/utils/extras/blobs.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
class BlobStorage:
    """
    A production-ready client for the distributed blob storage server.
    It handles communication with a list of server instances, manages a local cache,
    and implements backoff/retry logic for resilience.
    """

    def __init__(self, servers: list[str], storage_directory: str = './.data/blob_cache'):


        self.servers = servers
        self.session = requests.Session()
        self.storage_directory = storage_directory
        self.blob_ids = []
        os.makedirs(storage_directory, exist_ok=True)

        # Initialize the consistent hash ring
        self.hash_ring = ConsistentHashRing()
        for server in self.servers:
            self.hash_ring.add_node(server)

    def _make_request(self, method, endpoint, blob_id: str = None, max_retries=2, **kwargs):
        """
        Makes a resilient HTTP request to the server cluster.
        - If a blob_id is provided, it uses the consistent hash ring to find the
          primary server and subsequent backup servers in a predictable order.
        - If no blob_id is given (e.g., for broadcast actions), it tries servers randomly.
        - Implements exponential backoff on server errors.
        """
        if not self.servers:
            res = requests.Response()
            res.status_code = 503
            res.reason = "No servers available"
            return res

        if blob_id:
            # Get the ordered list of servers for this specific blob
            preferred_servers = self.hash_ring.get_nodes_for_key(blob_id)
        else:
            # For non-specific requests, shuffle all servers
            preferred_servers = random.sample(self.servers, len(self.servers))

        last_error = None
        for attempt in range(max_retries):
            for server in preferred_servers:
                url = f"{server.rstrip('/')}{endpoint}"
                try:
                    # In a targeted request, print which server we are trying
                    response = self.session.request(method, url, timeout=10, **kwargs)

                    if 500 <= response.status_code < 600:
                        get_logger().warning(f"Warning: Server {server} returned status {response.status_code}. Retrying...")
                        continue
                    response.raise_for_status()
                    return response
                except requests.exceptions.RequestException as e:
                    last_error = e
                    get_logger().warning(f"Warning: Could not connect to server {server}: {e}. Trying next server.")

            if attempt < max_retries - 1:
                wait_time = 2 ** (attempt*0.1)
                get_logger().warning(f"Warning: All preferred servers failed. Retrying in {wait_time} seconds...")
                time.sleep(wait_time)
                if len(preferred_servers) == 1 and len(self.servers) > 1:
                    preferred_servers = random.sample(self.servers, len(self.servers))

        raise ConnectionError(f"Failed to execute request after {max_retries} attempts. Last error: {last_error}")


    def create_blob(self, data: bytes, blob_id=None) -> str:
        """
        Creates a new blob. The blob_id is calculated client-side by hashing
        the content, and the data is sent to the correct server determined
        by the consistent hash ring. This uses a PUT request, making creation
        idempotent.
        """
        # The blob ID is the hash of its content, ensuring content-addressable storage.
        if not blob_id:
            blob_id = hashlib.sha256(data).hexdigest()

        # Use PUT, as we now know the blob's final ID/URL.
        # Pass blob_id to _make_request so it uses the hash ring.
        print(f"Creating blob {blob_id} on {self._make_request('PUT', f'/blob/{blob_id}',blob_id=blob_id, data=data).status_code}")
        # blob_id = response.text
        self._save_blob_to_cache(blob_id, data)
        return blob_id

    def read_blob(self, blob_id: str) -> bytes:
        cached_data = self._load_blob_from_cache(blob_id)
        if cached_data is not None:
            return cached_data

        get_logger().info(f"Info: Blob '{blob_id}' not in cache, fetching from network.")
        # Pass blob_id to _make_request to target the correct server(s).
        response = self._make_request('GET', f'/blob/{blob_id}', blob_id=blob_id)

        blob_data = response.content
        self._save_blob_to_cache(blob_id, blob_data)
        return blob_data

    def update_blob(self, blob_id: str, data: bytes):
        # Pass blob_id to _make_request to target the correct server(s).
        response = self._make_request('PUT', f'/blob/{blob_id}', blob_id=blob_id, data=data)
        self._save_blob_to_cache(blob_id, data)
        return response

    def delete_blob(self, blob_id: str):
        # Pass blob_id to _make_request to target the correct server(s).
        self._make_request('DELETE', f'/blob/{blob_id}', blob_id=blob_id)
        cache_file = self._get_blob_cache_filename(blob_id)
        if os.path.exists(cache_file):
            os.remove(cache_file)

    # NOTE: share_blobs and recover_blob are coordination endpoints. They do not
    # act on a single blob, so they will continue to use the non-targeted (random)
    # request mode to contact any available server to act as a coordinator.
    def share_blobs(self, blob_ids: list[str]):
        get_logger().info(f"Info: Instructing a server to share blobs for recovery: {blob_ids}")
        payload = {"blob_ids": blob_ids}
        # No blob_id passed, will try any server as a coordinator.
        self._make_request('POST', '/share', json=payload)
        get_logger().info("Info: Sharing command sent successfully.")

    def recover_blob(self, lost_blob_id: str) -> bytes:
        get_logger().info(f"Info: Attempting to recover '{lost_blob_id}' from the cluster.")
        payload = {"blob_id": lost_blob_id}
        # No blob_id passed, recovery can be initiated by any server.
        response = self._make_request('POST', '/recover', json=payload)

        recovered_data = response.content
        get_logger().info(f"Info: Successfully recovered blob '{lost_blob_id}'.")
        self._save_blob_to_cache(lost_blob_id, recovered_data)
        return recovered_data

    def _get_blob_cache_filename(self, blob_id: str) -> str:
        return os.path.join(self.storage_directory, blob_id + '.blobcache')

    def _save_blob_to_cache(self, blob_id: str, data: bytes):
        if not data or data is None:
            return
        if blob_id not in self.blob_ids:
            self.blob_ids.append(blob_id)
        with open(self._get_blob_cache_filename(blob_id), 'wb') as f:
            f.write(data)

    def _load_blob_from_cache(self, blob_id: str) -> bytes | None:
        cache_file = self._get_blob_cache_filename(blob_id)
        if not os.path.exists(cache_file):
            return None
        with open(cache_file, 'rb') as f:
            return f.read()

    def exit(self):
        if len(self.blob_ids) < 5:
            return
        for _i in range(len(self.servers)//2+1):
            self.share_blobs(self.blob_ids)
create_blob(data, blob_id=None)

Creates a new blob. The blob_id is calculated client-side by hashing the content, and the data is sent to the correct server determined by the consistent hash ring. This uses a PUT request, making creation idempotent.

Source code in toolboxv2/utils/extras/blobs.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
def create_blob(self, data: bytes, blob_id=None) -> str:
    """
    Creates a new blob. The blob_id is calculated client-side by hashing
    the content, and the data is sent to the correct server determined
    by the consistent hash ring. This uses a PUT request, making creation
    idempotent.
    """
    # The blob ID is the hash of its content, ensuring content-addressable storage.
    if not blob_id:
        blob_id = hashlib.sha256(data).hexdigest()

    # Use PUT, as we now know the blob's final ID/URL.
    # Pass blob_id to _make_request so it uses the hash ring.
    print(f"Creating blob {blob_id} on {self._make_request('PUT', f'/blob/{blob_id}',blob_id=blob_id, data=data).status_code}")
    # blob_id = response.text
    self._save_blob_to_cache(blob_id, data)
    return blob_id
ConsistentHashRing

A consistent hash ring implementation to map keys (blob_ids) to nodes (servers). It uses virtual nodes (replicas) to ensure a more uniform distribution of keys.

Source code in toolboxv2/utils/extras/blobs.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class ConsistentHashRing:
    """
    A consistent hash ring implementation to map keys (blob_ids) to nodes (servers).
    It uses virtual nodes (replicas) to ensure a more uniform distribution of keys.
    """
    def __init__(self, replicas=100):
        """
        :param replicas: The number of virtual nodes for each physical node.
                         Higher values lead to more balanced distribution.
        """
        self.replicas = replicas
        self._keys = []  # Sorted list of hash values (the ring)
        self._nodes = {} # Maps hash values to physical node URLs

    def _hash(self, key: str) -> int:
        """Hashes a key to an integer using md5 for speed and distribution."""
        return int(hashlib.md5(key.encode('utf-8')).hexdigest(), 16)

    def add_node(self, node: str):
        """Adds a physical node to the hash ring."""
        for i in range(self.replicas):
            vnode_key = f"{node}:{i}"
            h = self._hash(vnode_key)
            bisect.insort(self._keys, h)
            self._nodes[h] = node

    def get_nodes_for_key(self, key: str) -> list[str]:
        """
        Returns an ordered list of nodes responsible for the given key.
        The first node in the list is the primary, the rest are failover candidates
        in preferential order.
        """
        if not self._nodes:
            return []

        h = self._hash(key)
        start_idx = bisect.bisect_left(self._keys, h)

        # Collect unique physical nodes by iterating around the ring
        found_nodes = []
        for i in range(len(self._keys)):
            idx = (start_idx + i) % len(self._keys)
            node_hash = self._keys[idx]
            physical_node = self._nodes[node_hash]
            if physical_node not in found_nodes:
                found_nodes.append(physical_node)
            # Stop when we have found all unique physical nodes
            if len(found_nodes) == len(set(self._nodes.values())):
                break
        return found_nodes
__init__(replicas=100)

:param replicas: The number of virtual nodes for each physical node. Higher values lead to more balanced distribution.

Source code in toolboxv2/utils/extras/blobs.py
25
26
27
28
29
30
31
32
def __init__(self, replicas=100):
    """
    :param replicas: The number of virtual nodes for each physical node.
                     Higher values lead to more balanced distribution.
    """
    self.replicas = replicas
    self._keys = []  # Sorted list of hash values (the ring)
    self._nodes = {} # Maps hash values to physical node URLs
add_node(node)

Adds a physical node to the hash ring.

Source code in toolboxv2/utils/extras/blobs.py
38
39
40
41
42
43
44
def add_node(self, node: str):
    """Adds a physical node to the hash ring."""
    for i in range(self.replicas):
        vnode_key = f"{node}:{i}"
        h = self._hash(vnode_key)
        bisect.insort(self._keys, h)
        self._nodes[h] = node
get_nodes_for_key(key)

Returns an ordered list of nodes responsible for the given key. The first node in the list is the primary, the rest are failover candidates in preferential order.

Source code in toolboxv2/utils/extras/blobs.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def get_nodes_for_key(self, key: str) -> list[str]:
    """
    Returns an ordered list of nodes responsible for the given key.
    The first node in the list is the primary, the rest are failover candidates
    in preferential order.
    """
    if not self._nodes:
        return []

    h = self._hash(key)
    start_idx = bisect.bisect_left(self._keys, h)

    # Collect unique physical nodes by iterating around the ring
    found_nodes = []
    for i in range(len(self._keys)):
        idx = (start_idx + i) % len(self._keys)
        node_hash = self._keys[idx]
        physical_node = self._nodes[node_hash]
        if physical_node not in found_nodes:
            found_nodes.append(physical_node)
        # Stop when we have found all unique physical nodes
        if len(found_nodes) == len(set(self._nodes.values())):
            break
    return found_nodes
gist_control
GistLoader
Source code in toolboxv2/utils/extras/gist_control.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
class GistLoader:
    def __init__(self, gist_url):
        self.gist_url = gist_url
        self.module_code = None

    def load_module(self, module_name):
        """Lädt das Modul mit dem gegebenen Namen."""
        if self.module_code is None:
            self.module_code = self._fetch_gist_content()

        # Erstelle ein neues Modul
        module = importlib.util.module_from_spec(self.get_spec(module_name))
        exec(self.module_code, module.__dict__)
        return module

    def get_spec(self, module_name):
        """Gibt die Modul-Specifikation zurück."""
        return ModuleSpec(module_name, self)

    def get_filename(self, module_name):
        return f"<gist:{self.gist_url}>"

    def _fetch_gist_content(self):
        """Lädt den Inhalt des Gists von der GitHub API herunter."""
        gist_id = self.gist_url.split('/')[-1]
        api_url = f"https://api.github.com/gists/{gist_id}"

        response = requests.get(api_url)

        if response.status_code == 200:
            gist_data = response.json()
            first_file = next(iter(gist_data['files'].values()))
            return first_file['content']
        else:
            raise Exception(f"Failed to fetch gist: {response.status_code}")
get_spec(module_name)

Gibt die Modul-Specifikation zurück.

Source code in toolboxv2/utils/extras/gist_control.py
23
24
25
def get_spec(self, module_name):
    """Gibt die Modul-Specifikation zurück."""
    return ModuleSpec(module_name, self)
load_module(module_name)

Lädt das Modul mit dem gegebenen Namen.

Source code in toolboxv2/utils/extras/gist_control.py
13
14
15
16
17
18
19
20
21
def load_module(self, module_name):
    """Lädt das Modul mit dem gegebenen Namen."""
    if self.module_code is None:
        self.module_code = self._fetch_gist_content()

    # Erstelle ein neues Modul
    module = importlib.util.module_from_spec(self.get_spec(module_name))
    exec(self.module_code, module.__dict__)
    return module
helper_test_functions
generate_edge_value(param_type)

Generiert Edge-Case-Werte basierend auf dem Parametertyp.

Source code in toolboxv2/utils/extras/helper_test_functions.py
35
36
37
38
39
40
41
42
43
44
def generate_edge_value(param_type: Any) -> Any:
    """
    Generiert Edge-Case-Werte basierend auf dem Parametertyp.
    """
    if param_type in [int, float]:
        return -999  # Beispiel für negative Zahlen
    elif param_type == str:
        return "test " * 100  # Lange zufällige Strings
    # Fügen Sie hier weitere Bedingungen für andere Datentypen hinzu
    return None
generate_normal_value(param_type)

Generiert normale Werte basierend auf dem Parametertyp.

Source code in toolboxv2/utils/extras/helper_test_functions.py
47
48
49
50
51
52
53
54
55
56
57
58
59
def generate_normal_value(param_type: Any) -> Any:
    """
    Generiert normale Werte basierend auf dem Parametertyp.
    """
    from toolboxv2 import RequestData
    if param_type in [int, float]:
        return random.randint(0, 100)  # Zufällige normale Zahlen
    elif param_type == str:
        return "test" # Zufälliges Wort
    elif param_type == RequestData:
        return RequestData.moc()
    # Fügen Sie hier weitere Bedingungen für andere Datentypen hinzu
    return None
keword_matcher
calculate_keyword_score(text, keywords)

Berechnet den Keyword-Score basierend auf der Häufigkeit der Keywords im Text. Case-insensitive und optimiert für Geschwindigkeit.

:param text: Eingabetext als String :param keywords: Set von Keywords :return: Gesamt-Score als Integer

Source code in toolboxv2/utils/extras/keword_matcher.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def calculate_keyword_score(text: str, keywords: set[str]) -> int:
    """
    Berechnet den Keyword-Score basierend auf der Häufigkeit der Keywords im Text.
    Case-insensitive und optimiert für Geschwindigkeit.

    :param text: Eingabetext als String
    :param keywords: Set von Keywords
    :return: Gesamt-Score als Integer
    """
    # Vorverarbeitung der Keywords
    keyword_pattern = re.compile(
        r'\b(' + '|'.join(re.escape(k.lower()) for k in keywords) + r')\b',
        flags=re.IGNORECASE
    )

    # Erstelle Frequenz-Wörterbuch
    freq_dict = defaultdict(int)

    # Finde alle Übereinstimmungen
    matches = keyword_pattern.findall(text.lower())

    # Zähle die Treffer
    for match in matches:
        freq_dict[match.lower()] += 1

    # Berechne Gesamt-Score
    total_score = sum(freq_dict.values())

    return total_score
calculate_weighted_score(text, keyword_weights)

Berechnet gewichteten Score mit unterschiedlichen Gewichten pro Keyword

:param text: Eingabetext :param keyword_weights: Dictionary mit {Keyword: Gewicht} :return: Gewichteter Gesamt-Score

Source code in toolboxv2/utils/extras/keword_matcher.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
def calculate_weighted_score(text: str, keyword_weights: dict or list) -> float:
    """
    Berechnet gewichteten Score mit unterschiedlichen Gewichten pro Keyword

    :param text: Eingabetext
    :param keyword_weights: Dictionary mit {Keyword: Gewicht}
    :return: Gewichteter Gesamt-Score
    """
    total = 0.0
    text_lower = text.lower()

    if isinstance(keyword_weights, list):
        keyword_weights = {k:v for k, v in keyword_weights}

    for keyword, weight in keyword_weights.items():
        count = len(re.findall(r'\b' + re.escape(keyword.lower()) + r'\b', text_lower))
        total += count * weight

    return round(total, 2)
extract_keywords(text, max_len=-1, min_word_length=3, with_weights=False, remove_stopwords=True, stopwords=True)

Extrahiert Keywords mit optionaler Frequenzgewichtung

:param text: Eingabetext :param max_len: Maximale Anzahl Keywords (-1 = alle) :param min_word_length: Minimale Wortlänge :param with_weights: Gibt Wort+Frequenz zurück wenn True :param remove_stopwords: Filtert deutsche Stopwörter :param german_stopwords: Verwendet deutsche Standard-Stopwörter :return: Keywords oder (Keyword, Häufigkeit) Paare

Source code in toolboxv2/utils/extras/keword_matcher.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
def extract_keywords(
    text: str,
    max_len: int = -1,
    min_word_length: int = 3,
    with_weights: bool = False,
    remove_stopwords: bool = True,
    stopwords: bool = True
) -> list[str] | list[tuple[str, int]]:
    """
    Extrahiert Keywords mit optionaler Frequenzgewichtung

    :param text: Eingabetext
    :param max_len: Maximale Anzahl Keywords (-1 = alle)
    :param min_word_length: Minimale Wortlänge
    :param with_weights: Gibt Wort+Frequenz zurück wenn True
    :param remove_stopwords: Filtert deutsche Stopwörter
    :param german_stopwords: Verwendet deutsche Standard-Stopwörter
    :return: Keywords oder (Keyword, Häufigkeit) Paare
    """

    # Deutsche Basis-Stopwörter
    DEFAULT_STOPWORDS = STOPWORDS if stopwords else set()

    # Text vorverarbeiten
    words = re.findall(r'\b\w+\b', text.lower())

    # Worte filtern
    filtered_words = [
        word for word in words
        if len(word) > min_word_length
           and (not remove_stopwords or word not in DEFAULT_STOPWORDS)
    ]

    # Frequenzanalyse
    word_counts = defaultdict(int)
    for word in filtered_words:
        word_counts[word] += 1

    # Sortierung: Zuerst Häufigkeit, dann alphabetisch
    sorted_words = sorted(
        word_counts.items(),
        key=lambda x: (-x[1], x[0])
    )

    # Längenbegrenzung
    if max_len == -1:
        max_len = None
    result = sorted_words[:max_len]

    return result if with_weights else [word for word, _ in result]
reqbuilder
generate_requirements(folder, output_file)

Generates requirements.txt for the specified folder using pipreqs.

Source code in toolboxv2/utils/extras/reqbuilder.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
def generate_requirements(folder: str, output_file: str):
    """Generates requirements.txt for the specified folder using pipreqs."""
    print(folder, output_file, os.path.abspath(os.curdir))
    print("Not Implemented ")
    """try:
        from pipreqs.pipreqs import get_all_imports
    except ImportError:
        subprocess.run([sys.executable, "-m", "pip", "install", "pipreqs"], check=True)
    from pipreqs.pipreqs import get_all_imports
    imports = set(get_all_imports(os.path.abspath(folder)))
    imports.remove('toolboxv2') if 'toolboxv2' in imports else None
    with open(os.path.abspath(output_file), "w") as f:
        f.write("\n".join(imports))"""
run_pipeline(base_dir)

Runs the entire pipeline to generate requirements files.

Source code in toolboxv2/utils/extras/reqbuilder.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
def run_pipeline(base_dir: str):
    """Runs the entire pipeline to generate requirements files."""
    toolbox_path = os.path.join(base_dir, "toolboxv2")
    utils_path = os.path.join(toolbox_path, "utils")
    mini_req_file = os.path.join(base_dir, "requirements_mini.txt")
    extras_req_file = os.path.join(base_dir, "requirements_tests.txt")

    # Step 1: Generate minimal requirements
    print("Step 1/2: ")
    generate_requirements(utils_path, mini_req_file)

    # Step 2: Generate extended requirements
    print("Step 2/2: ")
    extras_path = os.path.join(toolbox_path, "tests")
    generate_requirements(extras_path, extras_req_file)

proxy

ProxyUtil
Source code in toolboxv2/utils/proxy/prox_util.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class ProxyUtil:
    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        # assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, timeout=6,
                        app: (App or AppType) | None = None,
                        remote_functions=None, peer=False, name='ProxyApp-client', do_connect=True, unix_socket=False,
                        test_override=False):
        self.class_instance = class_instance
        self.client = None
        self.test_override = test_override
        self.port = port
        self.host = host
        self.timeout = timeout
        if app is None:
            app = get_app("ProxyUtil")
        self.app = app
        self._name = name
        self.unix_socket = unix_socket
        if remote_functions is None:
            remote_functions = ["run_any", "a_run_any", "remove_mod", "save_load", "exit_main", "show_console", "hide_console",
                                "rrun_flow",
                                "get_autocompletion_dict",
                                "exit_main", "watch_mod"]
        self.remote_functions = remote_functions

        from toolboxv2.mods.SocketManager import SocketType
        self.connection_type = SocketType.client
        if peer:
            self.connection_type = SocketType.peer
        if do_connect:
            await self.connect()

    async def connect(self):
        client_result = await self.app.a_run_local(SOCKETMANAGER.CREATE_SOCKET,
                                           get_results=True,
                                           name=self._name,
                                           host=self.host,
                                           port=self.port,
                                           type_id=self.connection_type,
                                           max_connections=-1,
                                           return_full_object=True,
                                           test_override=self.test_override,
                                           unix_file=self.unix_socket)

        if client_result.is_error():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        if not client_result.is_data():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,
        result = await client_result.aget()
        if result is None or result.get('connection_error') != 0:
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        self.client = Result.ok(result)

    async def disconnect(self):
        time.sleep(1)
        close = self.client.get("close")
        await close()
        self.client = None

    async def reconnect(self):
        if self.client is not None:
            await self.disconnect()
        await self.connect()

    async def verify(self, message=b"verify"):
        await asyncio.sleep(1)
        # self.client.get('sender')({'keepalive': 0})
        await self.client.get('sender')(message)

    def __getattr__(self, name):

        # print(f"ProxyApp: {name}, {self.client is None}")
        if name == "on_exit":
            return self.disconnect
        if name == "rc":
            return self.reconnect

        if name == "r":
            try:
                return self.client.get('receiver_queue').get(timeout=self.timeout)
            except:
                return "No data"

        app_attr = getattr(self.class_instance, name)

        async def method(*args, **kwargs):
            # if name == 'run_any':
            #     print("method", name, kwargs.get('get_results', False), args[0])
            if self.client is None:
                await self.reconnect()
            if kwargs.get('spec', '-') == 'app':
                if asyncio.iscoroutinefunction(app_attr):
                    return await app_attr(*args, **kwargs)
                return app_attr(*args, **kwargs)
            try:
                if name in self.remote_functions:
                    if (name == 'run_any' or name == 'a_run_any') and not kwargs.get('get_results', False):
                        if asyncio.iscoroutinefunction(app_attr):
                            return await app_attr(*args, **kwargs)
                        return app_attr(*args, **kwargs)
                    if (name == 'run_any' or name == 'a_run_any') and kwargs.get('get_results', False):
                        if isinstance(args[0], Enum):
                            args = (args[0].__class__.NAME.value, args[0].value), args[1:]
                    self.app.sprint(f"Calling method {name}, {args=}, {kwargs}=")
                    await self.client.get('sender')({'name': name, 'args': args, 'kwargs': kwargs})
                    while Spinner("Waiting for result"):
                        try:
                            data = self.client.get('receiver_queue').get(timeout=self.timeout)
                            if isinstance(data, dict) and 'identifier' in data:
                                del data["identifier"]
                            if 'error' in data and 'origin' in data and 'result' in data and 'info' in data:
                                data = ApiResult(**data).as_result()
                            return data
                        except:
                            print("No data look later with class_instance.r")
                            return Result.default_internal_error("No data received from Demon."
                                                                 " uns class_instance.r to get data later")
            except:
                if self.client.get('socket') is None:
                    self.client = None
            return app_attr(*args, **kwargs)

        if callable(app_attr) and name in self.remote_functions and self.client is not None:
            return method
        return app_attr
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/proxy/prox_util.py
20
21
22
23
24
25
26
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/proxy/prox_util.py
28
29
30
31
32
33
34
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    # assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
prox_util
ProxyUtil
Source code in toolboxv2/utils/proxy/prox_util.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
class ProxyUtil:
    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.async_initialized = False

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        # assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()

    async def __ainit__(self, class_instance: Any, host='0.0.0.0', port=6587, timeout=6,
                        app: (App or AppType) | None = None,
                        remote_functions=None, peer=False, name='ProxyApp-client', do_connect=True, unix_socket=False,
                        test_override=False):
        self.class_instance = class_instance
        self.client = None
        self.test_override = test_override
        self.port = port
        self.host = host
        self.timeout = timeout
        if app is None:
            app = get_app("ProxyUtil")
        self.app = app
        self._name = name
        self.unix_socket = unix_socket
        if remote_functions is None:
            remote_functions = ["run_any", "a_run_any", "remove_mod", "save_load", "exit_main", "show_console", "hide_console",
                                "rrun_flow",
                                "get_autocompletion_dict",
                                "exit_main", "watch_mod"]
        self.remote_functions = remote_functions

        from toolboxv2.mods.SocketManager import SocketType
        self.connection_type = SocketType.client
        if peer:
            self.connection_type = SocketType.peer
        if do_connect:
            await self.connect()

    async def connect(self):
        client_result = await self.app.a_run_local(SOCKETMANAGER.CREATE_SOCKET,
                                           get_results=True,
                                           name=self._name,
                                           host=self.host,
                                           port=self.port,
                                           type_id=self.connection_type,
                                           max_connections=-1,
                                           return_full_object=True,
                                           test_override=self.test_override,
                                           unix_file=self.unix_socket)

        if client_result.is_error():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        if not client_result.is_data():
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        # 'socket': socket,
        # 'receiver_socket': r_socket,
        # 'host': host,
        # 'port': port,
        # 'p2p-port': endpoint_port,
        # 'sender': send,
        # 'receiver_queue': receiver_queue,
        # 'connection_error': connection_error,
        # 'receiver_thread': s_thread,
        # 'keepalive_thread': keep_alive_thread,
        # 'running_dict': running_dict,
        # 'client_to_receiver_thread': to_receive,
        # 'client_receiver_threads': threeds,
        result = await client_result.aget()
        if result is None or result.get('connection_error') != 0:
            raise Exception(f"Client {self._name} error: {client_result.print(False)}")
        self.client = Result.ok(result)

    async def disconnect(self):
        time.sleep(1)
        close = self.client.get("close")
        await close()
        self.client = None

    async def reconnect(self):
        if self.client is not None:
            await self.disconnect()
        await self.connect()

    async def verify(self, message=b"verify"):
        await asyncio.sleep(1)
        # self.client.get('sender')({'keepalive': 0})
        await self.client.get('sender')(message)

    def __getattr__(self, name):

        # print(f"ProxyApp: {name}, {self.client is None}")
        if name == "on_exit":
            return self.disconnect
        if name == "rc":
            return self.reconnect

        if name == "r":
            try:
                return self.client.get('receiver_queue').get(timeout=self.timeout)
            except:
                return "No data"

        app_attr = getattr(self.class_instance, name)

        async def method(*args, **kwargs):
            # if name == 'run_any':
            #     print("method", name, kwargs.get('get_results', False), args[0])
            if self.client is None:
                await self.reconnect()
            if kwargs.get('spec', '-') == 'app':
                if asyncio.iscoroutinefunction(app_attr):
                    return await app_attr(*args, **kwargs)
                return app_attr(*args, **kwargs)
            try:
                if name in self.remote_functions:
                    if (name == 'run_any' or name == 'a_run_any') and not kwargs.get('get_results', False):
                        if asyncio.iscoroutinefunction(app_attr):
                            return await app_attr(*args, **kwargs)
                        return app_attr(*args, **kwargs)
                    if (name == 'run_any' or name == 'a_run_any') and kwargs.get('get_results', False):
                        if isinstance(args[0], Enum):
                            args = (args[0].__class__.NAME.value, args[0].value), args[1:]
                    self.app.sprint(f"Calling method {name}, {args=}, {kwargs}=")
                    await self.client.get('sender')({'name': name, 'args': args, 'kwargs': kwargs})
                    while Spinner("Waiting for result"):
                        try:
                            data = self.client.get('receiver_queue').get(timeout=self.timeout)
                            if isinstance(data, dict) and 'identifier' in data:
                                del data["identifier"]
                            if 'error' in data and 'origin' in data and 'result' in data and 'info' in data:
                                data = ApiResult(**data).as_result()
                            return data
                        except:
                            print("No data look later with class_instance.r")
                            return Result.default_internal_error("No data received from Demon."
                                                                 " uns class_instance.r to get data later")
            except:
                if self.client.get('socket') is None:
                    self.client = None
            return app_attr(*args, **kwargs)

        if callable(app_attr) and name in self.remote_functions and self.client is not None:
            return method
        return app_attr
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/proxy/prox_util.py
20
21
22
23
24
25
26
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.async_initialized = False
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/proxy/prox_util.py
28
29
30
31
32
33
34
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    # assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self

security

Code
Source code in toolboxv2/utils/security/cryp.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key(as_str=True) -> str or bytes:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        key = Fernet.generate_key()
        if as_str:
            key = key.decode()
        return key

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
101
102
103
104
105
106
107
108
109
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key(as_str=True) staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str or bytes

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
127
128
129
130
131
132
133
134
135
136
137
138
@staticmethod
def generate_symmetric_key(as_str=True) -> str or bytes:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    key = Fernet.generate_key()
    if as_str:
        key = key.decode()
    return key
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
422
423
424
425
426
427
428
429
430
431
432
433
434
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)
cryp
Code
Source code in toolboxv2/utils/security/cryp.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key(as_str=True) -> str or bytes:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        key = Fernet.generate_key()
        if as_str:
            key = key.decode()
        return key

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()
decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"
decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()
encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"
encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"
generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key
generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
101
102
103
104
105
106
107
108
109
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)
generate_symmetric_key(as_str=True) staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str or bytes

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
127
128
129
130
131
132
133
134
135
136
137
138
@staticmethod
def generate_symmetric_key(as_str=True) -> str or bytes:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    key = Fernet.generate_key()
    if as_str:
        key = key.decode()
    return key
load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key
one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()
pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
422
423
424
425
426
427
428
429
430
431
432
433
434
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key
public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()
save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

singelton_class

Singleton

Singleton metaclass for ensuring only one instance of a class.

Source code in toolboxv2/utils/singelton_class.py
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
class Singleton(type):
    """
    Singleton metaclass for ensuring only one instance of a class.
    """

    _instances = {}
    _kwargs = {}
    _args = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
            cls._args[cls] = args
            cls._kwargs[cls] = kwargs
        return cls._instances[cls]

system

AppType
Source code in toolboxv2/utils/system/types.py
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str
    is_server:bool = False

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    cluster_manager: ClusterManager
    root_blob_storage: BlobStorage
    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    websocket_handlers: dict[str, dict[str, Callable]] = {}
    _rust_ws_bridge: Any = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    def start_server(self):
        from toolboxv2.utils.system.api import manage_server
        if self.is_server:
            return
        manage_server("start")
        self.is_server = False

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_external_mods(self):
        """proxi attr"""

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_bg_task_advanced(self, task, *args, **kwargs):
        """
        proxi attr
        """

    def wait_for_bg_tasks(self, timeout=None):
        """
        proxi attr
        """

    def run_bg_task(self, task):
        """
                run a async fuction
                """
    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300,
                          websocket_handler: str | None = None,):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           websocket_handler: str | None = None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)
debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1426
1427
async def a_exit(self):
    """proxi attr"""
a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1491
1492
1493
1494
1495
1496
1497
1498
1499
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1417
1418
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1513
1514
1515
1516
1517
1518
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""
a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1471
1472
1473
1474
1475
1476
1477
1478
1479
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""
debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1308
1309
def debug_rains(self, e):
    """proxi attr"""
disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1296
1297
1298
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""
execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)
exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1420
1421
def exit(self):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1284
1285
1286
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1481
1482
1483
1484
1485
1486
1487
1488
1489
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""
get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1698
1699
def get_autocompletion_dict(self):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1432
1433
1434
1435
1436
1437
1438
1439
1440
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1520
1521
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""
get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1701
1702
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""
hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1288
1289
1290
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""
inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1357
1358
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""
load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1388
1389
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""
load_external_mods() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1385
1386
async def load_external_mods(self):
    """proxi attr"""
load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1379
1380
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""
mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def mod_online(self, mod_name, installed=False):
    """proxi attr"""
print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1523
1524
1525
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""
print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
1406
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")
reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1408
1409
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""
remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1414
1415
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1317
1318
def rrun_flows(self, name, **kwargs):
    """proxi attr"""
run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1442
1443
1444
1445
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """
run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1507
1508
1509
1510
1511
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""
run_bg_task(task)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1457
1458
1459
1460
def run_bg_task(self, task):
    """
            run a async fuction
            """
run_bg_task_advanced(task, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1447
1448
1449
1450
def run_bg_task_advanced(self, task, *args, **kwargs):
    """
    proxi attr
    """
run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1314
1315
def run_flows(self, name, **kwargs):
    """proxi attr"""
run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1461
1462
1463
1464
1465
1466
1467
1468
1469
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""
run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1501
1502
1503
1504
1505
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""
save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1695
1696
def save_autocompletion_dict(self):
    """proxi attr"""
save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1376
1377
def save_exit(self):
    """proxi attr"""
save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1363
1364
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""
save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1360
1361
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""
save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1429
1430
def save_load(self, modname, spec='app'):
    """proxi attr"""
save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1704
1705
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""
set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1311
1312
def set_flows(self, r):
    """proxi attr"""
set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1300
1301
def set_logger(self, debug=False):
    """proxi attr"""
show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1292
1293
1294
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""
sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1527
1528
1529
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None, websocket_handler=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       websocket_handler: str | None = None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
wait_for_bg_tasks(timeout=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1452
1453
1454
1455
def wait_for_bg_tasks(self, timeout=None):
    """
    proxi attr
    """
watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1411
1412
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""
web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1423
1424
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""
MainTool
Source code in toolboxv2/utils/system/main_tool.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.tools = kwargs.get("tool", {})
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
            self.on_exit =self.app.tb(
                mod_name=self.name,
                name=kwargs.get("on_exit").__name__,
                version=self.version if hasattr(self, 'version') else "0.0.0",
            )(kwargs.get("on_exit"))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start"))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.tools = kwargs.get("tool", {})
    self.logger = kwargs.get("logs", get_logger())
    self.color = kwargs.get("color", "WHITE")
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
        self.on_exit =self.app.tb(
            mod_name=self.name,
            name=kwargs.get("on_exit").__name__,
            version=self.version if hasattr(self, 'version') else "0.0.0",
        )(kwargs.get("on_exit"))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
174
175
176
177
178
179
180
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
167
168
169
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
164
165
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
MainToolType
Source code in toolboxv2/utils/system/types.py
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
class MainToolType:
    toolID: str
    app: A
    interface: ToolBoxInterfaces
    spec: str

    version: str
    tools: dict  # legacy
    name: str
    logger: logging
    color: str
    todo: Callable
    _on_exit: Callable
    stuf: bool
    config: dict
    user: U | None
    description: str

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None) -> Result:
        """proxi attr"""

    def load(self):
        """proxi attr"""

    def print(self, message, end="\n", **kwargs):
        """proxi attr"""

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    async def get_user(self, username: str) -> Result:
        return self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)
load()

proxi attr

Source code in toolboxv2/utils/system/types.py
1183
1184
def load(self):
    """proxi attr"""
print(message, end='\n', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1186
1187
def print(self, message, end="\n", **kwargs):
    """proxi attr"""
return_result(error=ToolBoxError.none, exec_code=0, help_text='', data_info=None, data=None, data_to=None) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1174
1175
1176
1177
1178
1179
1180
1181
@staticmethod
def return_result(error: ToolBoxError = ToolBoxError.none,
                  exec_code: int = 0,
                  help_text: str = "",
                  data_info=None,
                  data=None,
                  data_to=None) -> Result:
    """proxi attr"""
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/types.py
1195
1196
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
Result
Source code in toolboxv2/utils/system/types.py
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        return self.info.exec_code != 200

    def is_ok(self):
        return not self.is_error()

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: dict | None = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + f'Data_{self.result.data_type}: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{(data[:100]+'...') if not data.endswith('NO Data') else ''}\n")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
844
845
846
847
848
849
850
851
852
853
854
855
856
857
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
943
944
945
946
947
948
949
950
951
952
953
954
955
956
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers dict | None

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: dict | None = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)
all_functions_enums

Automatic generated by ToolBox v = 0.1.22

api
build_cargo_project(debug=False)

Build the Cargo project, optionally in debug mode.

Source code in toolboxv2/utils/system/api.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def build_cargo_project(debug=False):
    """Build the Cargo project, optionally in debug mode."""
    mode = "debug" if debug else "release"
    args = ["cargo", "build"]
    if not debug:
        args.append("--release")

    print(f"Building in {mode} mode...")
    try:
        subprocess.run(args, cwd=os.path.join(".", "src-core"), check=True)
        exe_path = get_executable_name_with_extension()
        if exe_path:
            bin_dir = tb_root_dir / "bin"
            bin_dir.mkdir(exist_ok=True)
            exe_path = Path(exe_path)
            try:
                shutil.copy(exe_path, bin_dir / exe_path.name)
            except Exception:
                bin_dir = tb_root_dir / "ubin"
                bin_dir.mkdir(exist_ok=True)
                (bin_dir / exe_path.name).unlink(missing_ok=True)
                try:
                    shutil.copy(exe_path, bin_dir / exe_path.name)
                except Exception as e:
                    print(f"Failed to copy executable: {e}")
            print(f"Copied executable to '{bin_dir.resolve()}'")
        return True
    except subprocess.CalledProcessError as e:
        print(f"Cargo build failed: {e}")
        return False
check_cargo_installed()

Check if Cargo (Rust package manager) is installed on the system.

Source code in toolboxv2/utils/system/api.py
164
165
166
167
168
169
170
def check_cargo_installed():
    """Check if Cargo (Rust package manager) is installed on the system."""
    try:
        subprocess.run(["cargo", "--version"], check=True, capture_output=True)
        return True
    except Exception:
        return False
cleanup_build_files()

Cleans up build files.

Source code in toolboxv2/utils/system/api.py
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def cleanup_build_files():
    """Cleans up build files."""
    src_core_path = os.path.join(".", "src-core")
    target_path = os.path.join(src_core_path, "target")

    if os.path.exists(target_path):
        try:
            print(f"Cleaning up build files in {target_path}...")
            # First try using cargo clean
            try:
                subprocess.run(["cargo", "clean"], cwd=src_core_path, check=True)
                print("Successfully cleaned up build files with cargo clean")
            except subprocess.CalledProcessError:
                # If cargo clean fails, manually remove directories
                print("Cargo clean failed, manually removing build directories...")
                for item in os.listdir(target_path):
                    item_path = os.path.join(target_path, item)
                    if os.path.isdir(item_path) and item != ".rustc_info.json":
                        shutil.rmtree(item_path)
                        print(f"Removed {item_path}")
            return True
        except Exception as e:
            print(f"Failed to clean up build files: {e}")
            return False
    else:
        print(f"Build directory {target_path} not found")
        return True
detect_os_and_arch()

Detect the current operating system and architecture.

Source code in toolboxv2/utils/system/api.py
102
103
104
105
106
def detect_os_and_arch():
    """Detect the current operating system and architecture."""
    current_os = platform.system().lower()  # e.g., 'windows', 'linux', 'darwin'
    machine = platform.machine().lower()  # e.g., 'x86_64', 'amd64'
    return current_os, machine
download_executable(url, file_name)

Attempt to download the executable from the provided URL.

Source code in toolboxv2/utils/system/api.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
def download_executable(url, file_name):
    """Attempt to download the executable from the provided URL."""
    try:
        import requests
    except ImportError:
        print("The 'requests' library is required. Please install it via pip install requests")
        sys.exit(1)

    print(f"Attempting to download executable from {url}...")
    try:
        response = requests.get(url, stream=True)
    except Exception as e:
        print(f"Download error: {e}")
        return None

    if response.status_code == 200:
        with open(file_name, "wb") as f:
            for chunk in response.iter_content(chunk_size=8192):
                if chunk:
                    f.write(chunk)
        # Make the file executable on non-Windows systems
        if platform.system().lower() != "windows":
            os.chmod(file_name, 0o755)
        return file_name
    else:
        print("Download failed. Status code:", response.status_code)
        return None
find_highest_zip_version(name_filter, app_version=None, root_dir='mods_sto', version_only=False)

Findet die höchste verfügbare ZIP-Version in einem Verzeichnis basierend auf einem Namensfilter.

Parameters:

Name Type Description Default
root_dir str

Wurzelverzeichnis für die Suche

'mods_sto'
name_filter str

Namensfilter für die ZIP-Dateien

required
app_version str

Aktuelle App-Version für Kompatibilitätsprüfung

None

Returns:

Name Type Description
str str

Pfad zur ZIP-Datei mit der höchsten Version oder None wenn keine gefunden

Source code in toolboxv2/utils/system/api.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def find_highest_zip_version(name_filter: str, app_version: str = None, root_dir: str = "mods_sto", version_only=False) -> str:
    """
    Findet die höchste verfügbare ZIP-Version in einem Verzeichnis basierend auf einem Namensfilter.

    Args:
        root_dir (str): Wurzelverzeichnis für die Suche
        name_filter (str): Namensfilter für die ZIP-Dateien
        app_version (str, optional): Aktuelle App-Version für Kompatibilitätsprüfung

    Returns:
        str: Pfad zur ZIP-Datei mit der höchsten Version oder None wenn keine gefunden
    """

    # Kompiliere den Regex-Pattern für die Dateinamen
    pattern = fr"{name_filter}&v[0-9.]+§([0-9.]+)\.zip$"

    highest_version = None
    highest_version_file = None

    # Durchsuche das Verzeichnis
    root_path = Path(root_dir)
    for file_path in root_path.rglob("*.zip"):
        if "RST$"+name_filter not in str(file_path):
            continue
        match = re.search(pattern, str(file_path).split("RST$")[-1].strip())
        if match:
            zip_version = match.group(1)

            # Prüfe App-Version Kompatibilität falls angegeben
            if app_version:
                file_app_version = re.search(r"&v([0-9.]+)§", str(file_path)).group(1)
                if version.parse(file_app_version) > version.parse(app_version):
                    continue

            # Vergleiche Versionen
            current_version = version.parse(zip_version)
            if highest_version is None or current_version > highest_version:
                highest_version = current_version
                highest_version_file = str(file_path)
    if version_only:
        return str(highest_version)
    return highest_version_file
find_highest_zip_version_entry(name, target_app_version=None, filepath='tbState.yaml')

Findet den Eintrag mit der höchsten ZIP-Version für einen gegebenen Namen und eine optionale Ziel-App-Version in einer YAML-Datei.

:param name: Der Name des gesuchten Eintrags. :param target_app_version: Die Zielversion der App als String (optional). :param filepath: Der Pfad zur YAML-Datei. :return: Den Eintrag mit der höchsten ZIP-Version innerhalb der Ziel-App-Version oder None, falls nicht gefunden.

Source code in toolboxv2/utils/system/api.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def find_highest_zip_version_entry(name, target_app_version=None, filepath='tbState.yaml'):
    """
    Findet den Eintrag mit der höchsten ZIP-Version für einen gegebenen Namen und eine optionale Ziel-App-Version in einer YAML-Datei.

    :param name: Der Name des gesuchten Eintrags.
    :param target_app_version: Die Zielversion der App als String (optional).
    :param filepath: Der Pfad zur YAML-Datei.
    :return: Den Eintrag mit der höchsten ZIP-Version innerhalb der Ziel-App-Version oder None, falls nicht gefunden.
    """
    import yaml
    highest_zip_ver = None
    highest_entry = {}

    with open(filepath) as file:
        data = yaml.safe_load(file)
        # print(data)
        app_ver_h = None
        for key, value in list(data.get('installable', {}).items())[::-1]:
            # Prüfe, ob der Name im Schlüssel enthalten ist

            if name in key:
                v = value['version']
                if len(v) == 1:
                    app_ver = v[0].split('v')[-1]
                    zip_ver = "0.0.0"
                else:
                    app_ver, zip_ver = v
                    app_ver = app_ver.split('v')[-1]
                app_ver = version.parse(app_ver)
                # Wenn eine Ziel-App-Version angegeben ist, vergleiche sie
                if target_app_version is None or app_ver == version.parse(target_app_version):
                    current_zip_ver = version.parse(zip_ver)
                    # print(current_zip_ver, highest_zip_ver)

                    if highest_zip_ver is None or current_zip_ver > highest_zip_ver:
                        highest_zip_ver = current_zip_ver
                        highest_entry = value

                    if app_ver_h is None or app_ver > app_ver_h:
                        app_ver_h = app_ver
                        highest_zip_ver = current_zip_ver
                        highest_entry = value
    return highest_entry
get_executable_path()

Find the release executable in standard locations.

Source code in toolboxv2/utils/system/api.py
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
def get_executable_path():
    """Find the release executable in standard locations."""
    # This function is simplified from your example to match this script's scope
    exe_name = get_executable_name_with_extension()
    from toolboxv2 import tb_root_dir
    search_paths = [
        tb_root_dir / Path("bin") / exe_name,
        tb_root_dir / Path("src-core") / exe_name,
        tb_root_dir / exe_name,
        tb_root_dir / Path("src-core") / "target" / "release" / exe_name,
    ]
    for path in search_paths:
        print(path)
        if path.exists() and path.is_file():
            return path.resolve()
    return None
query_executable_url(current_os, machine)

Query a remote URL for a matching executable based on OS and architecture. The file name is built dynamically based on parameters.

Source code in toolboxv2/utils/system/api.py
109
110
111
112
113
114
115
116
117
118
119
120
121
def query_executable_url(current_os, machine):
    """
    Query a remote URL for a matching executable based on OS and architecture.
    The file name is built dynamically based on parameters.
    """
    base_url = "https://example.com/downloads"  # Replace with the actual URL
    # Windows executables have .exe extension
    if current_os == "windows":
        file_name = f"server_{current_os}_{machine}.exe"
    else:
        file_name = f"server_{current_os}_{machine}"
    full_url = f"{base_url}/{file_name}"
    return full_url, file_name
remove_release_executable()

Removes the release executable.

Source code in toolboxv2/utils/system/api.py
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def remove_release_executable():
    """Removes the release executable."""
    src_core_path = os.path.join(".", "src-core")
    expected_name = "simple-core-server.exe" if platform.system().lower() == "windows" else "simple-core-server"

    # Remove from src-core root
    direct_path = os.path.join(src_core_path, expected_name)
    if os.path.exists(direct_path):
        try:
            os.remove(direct_path)
            print(f"Removed release executable: {direct_path}")
        except Exception as e:
            print(f"Failed to remove {direct_path}: {e}")

    # Remove from target/release
    release_path = os.path.join(src_core_path, "target", "release", expected_name)
    if os.path.exists(release_path):
        try:
            os.remove(release_path)
            print(f"Removed release executable: {release_path}")
        except Exception as e:
            print(f"Failed to remove {release_path}: {e}")

    return True
run_executable(file_path)

Run the executable file.

Source code in toolboxv2/utils/system/api.py
153
154
155
156
157
158
159
160
161
def run_executable(file_path):
    """Run the executable file."""
    try:
        print("Running it.")
        subprocess.run([os.path.abspath(file_path)], check=True)
    except subprocess.CalledProcessError as e:
        print(f"Failed to execute {file_path}: {e}")
    except KeyboardInterrupt:
        print("Exiting call from:", file_path)
run_in_debug_mode()

Run the Cargo project in debug mode.

Source code in toolboxv2/utils/system/api.py
233
234
235
236
237
238
239
240
241
242
def run_in_debug_mode():
    """Run the Cargo project in debug mode."""
    src_core_path = os.path.join(".", "src-core")
    print("Running in debug mode...")
    try:
        subprocess.run(["cargo", "run"], cwd=src_core_path)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Debug execution failed: {e}")
        return False
run_with_hot_reload()

Run the Cargo project with hot reloading.

Source code in toolboxv2/utils/system/api.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def run_with_hot_reload():
    """Run the Cargo project with hot reloading."""
    src_core_path = os.path.join(".", "src-core")

    # Check if cargo-watch is installed
    try:
        subprocess.run(["cargo", "watch", "--version"], check=True, capture_output=True)
    except Exception:
        print("cargo-watch is not installed. Installing now...")
        try:
            subprocess.run(["cargo", "install", "cargo-watch"], check=True)
        except subprocess.CalledProcessError as e:
            print(f"Failed to install cargo-watch: {e}")
            print("Running without hot reload")
            return run_in_debug_mode()

    print("Running with hot reload in debug mode...")
    try:
        subprocess.run(["cargo", "watch", "-x", "run"], cwd=src_core_path)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Hot reload execution failed: {e}")
        return False
    except KeyboardInterrupt:
        print("Exiting hot reload: KeyboardInterrupt")
        return False
update_server(new_executable_path, new_version, use_posix_zdt)

High-level update function, calls platform-specific logic.

Source code in toolboxv2/utils/system/api.py
482
483
484
485
486
487
488
489
490
491
def update_server(new_executable_path: str, new_version: str, use_posix_zdt: bool):
    """High-level update function, calls platform-specific logic."""
    # Only use POSIX ZDT if flag is set AND on a non-windows system
    is_posix = platform.system().lower() != "windows"
    if is_posix and use_posix_zdt:
        return update_server_posix(new_executable_path, new_version)
    else:
        if use_posix_zdt and not is_posix:
            print(Style.YELLOW("Warning: --posix-zdt flag ignored on Windows. Using graceful restart."))
        return update_server_graceful_restart(new_executable_path, new_version)
conda_runner
create_env_registry(env_name)

Create a JSON registry of all packages installed in the specified conda environment.

Args: env_name (str): Name of the conda environment

Returns: bool: True if registry creation was successful, False otherwise

Source code in toolboxv2/utils/system/conda_runner.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
def create_env_registry(env_name: str) -> bool:
    """
    Create a JSON registry of all packages installed in the specified conda environment.

    Args:
    env_name (str): Name of the conda environment

    Returns:
    bool: True if registry creation was successful, False otherwise
    """
    # Get list of installed packages
    command = f"conda list -n {env_name} --json"
    success, output = run_command(command, live=False)

    if not success or output is None:
        print(f"Failed to get package list for environment {env_name}")
        return False

    try:
        # Parse the JSON output
        packages = json.loads(output)

        # Create a simplified registry with package names and versions
        registry = [{"name": pkg["name"], "version": pkg["version"]} for pkg in packages]

        # Write the registry to a JSON file
        registry_file = f"{env_name}_registry.json"
        with open(registry_file, 'w') as f:
            json.dump(registry, f, indent=2)

        print(f"Registry created successfully: {registry_file}")
        return True

    except json.JSONDecodeError:
        print(f"Failed to parse package list for environment {env_name}")
        return False
    except OSError:
        print(f"Failed to write registry file for environment {env_name}")
        return False
db_cli_manager
ClusterManager

Manages a cluster of r_blob_db instances defined in a config file.

Source code in toolboxv2/utils/system/db_cli_manager.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
class ClusterManager:
    """Manages a cluster of r_blob_db instances defined in a config file."""

    def __init__(self, config_path: str = CLUSTER_CONFIG_FILE):
        self.config_path = Path(config_path)
        self.instances: dict[str, DBInstanceManager] = self._load_config()

    def _load_config(self) -> dict[str, DBInstanceManager]:
        """Loads and validates the cluster configuration."""
        from toolboxv2 import tb_root_dir
        if not self.config_path.is_absolute():
            self.config_path = tb_root_dir / self.config_path

        default_config_dir = (tb_root_dir / ".data/db_data/").resolve()
        default_config = {
            "instance-01": {"port": 3001, "data_dir": str(default_config_dir / "01")},
            "instance-02": {"port": 3002, "data_dir": str(default_config_dir / "02")},
        }

        if not self.config_path.exists():
            print(Style.YELLOW(f"Warning: Cluster config '{self.config_path}' not found. Creating a default example."))

            with open(self.config_path, 'w') as f:
                json.dump(default_config, f, indent=4)
            config_data = default_config
        else:
            try:
                with open(self.config_path) as f:
                    config_data = json.load(f)
            except json.JSONDecodeError:
                print(Style.RED(f"Error: Cluster config '{self.config_path}' is not valid JSON. using default config."))
                config_data = default_config

        return {id: DBInstanceManager(id, cfg) for id, cfg in config_data.items()}

    def get_instances(self, instance_id: str | None = None) -> list[DBInstanceManager]:
        """Returns a list of instances to operate on."""
        if instance_id:
            if instance_id not in self.instances:
                raise ValueError(f"Instance ID '{instance_id}' not found in '{self.config_path}'.")
            return [self.instances[instance_id]]
        return list(self.instances.values())

    def start_all(self, executable_path: Path, version: str, instance_id: str | None = None):
        for instance in self.get_instances(instance_id):
            instance.start(executable_path, version)

    def stop_all(self, instance_id: str | None = None):
        for instance in self.get_instances(instance_id):
            instance.stop()

    def status_all(self, instance_id: str | None = None, silent=False):
        if not silent:
            header = f"--- {Style.Bold('Cluster Status')} ---"
            print(header)
            print(
                f"{Style.Underline('INSTANCE ID'):<18} {Style.Underline('STATUS'):<20} {Style.Underline('PID'):<8} {Style.Underline('VERSION'):<12} {Style.Underline('PORT')}")

        services_online = 0
        server_list = []
        for instance in self.get_instances(instance_id):
            pid, version = instance.read_state()
            is_running = instance.is_running()
            if is_running:
                server_list.append(f"http://{instance.host}:{instance.port}")
                services_online += 1
            if not silent:
                status_str = "✅ RUNNING" if is_running else "❌ STOPPED"
                status_color = Style.GREEN2 if is_running else Style.RED2
                print(
                    f"  {Style.WHITE(instance.id):<16} {status_color(status_str):<20} {Style.GREY(str(pid or 'N/A')):<8} {Style.BLUE2(version or 'N/A'):<12} {Style.YELLOW(str(instance.port))}"
                )
        if not silent:
            print("-" * len(header))
        return services_online, server_list

    def health_check_all(self, instance_id: str | None = None):
        header = f"--- {Style.Bold('Cluster Health Check')} ---"
        print(header)
        print(
            f"{Style.Underline('INSTANCE ID'):<18} {Style.Underline('STATUS'):<22} {Style.Underline('PID'):<8} {Style.Underline('LATENCY'):<12} {Style.Underline('DETAILS')}")

        for instance in self.get_instances(instance_id):
            health = instance.get_health()
            status = health.get('status', 'UNKNOWN')
            pid = health.get('pid', 'N/A')
            details = ""

            if status == 'OK':
                status_str, color = "✅ OK", Style.GREEN2
                latency = f"{health['latency_ms']}ms"
                details = f"Blobs: {Style.YELLOW(str(health['blobs_managed']))} | Version: {Style.BLUE2(health['server_version'])}"
            elif status == 'STOPPED':
                status_str, color = "❌ STOPPED", Style.RED2
                latency = "N/A"
            else:
                status_str, color = f"🔥 {status}", Style.RED
                latency = "N/A"
                details = Style.GREY(str(health.get('error', 'N/A')))

            print(
                f"  {Style.WHITE(instance.id):<16} {color(status_str):<22} {Style.GREY(str(pid)):<8} {Style.GREEN(latency):<12} {details}")
        print("-" * len(header))

    def update_all_rolling(self, new_executable_path: Path, new_version: str, instance_id: str | None = None):
        """Performs a zero-downtime rolling update of the cluster."""
        print(f"--- {Style.Bold(f'Starting Rolling Update to Version {Style.YELLOW(new_version)}')} ---")
        instances_to_update = self.get_instances(instance_id)
        for i, instance in enumerate(instances_to_update):
            print(
                f"\n{Style.CYAN(f'[{i + 1}/{len(instances_to_update)}] Updating instance')} '{Style.WHITE(instance.id)}'...")

            if not instance.stop():
                print(Style.RED2(f"CRITICAL: Failed to stop old instance '{instance.id}'. Aborting update."))
                return

            if not instance.start(new_executable_path, new_version):
                print(Style.RED2(f"CRITICAL: Failed to start new version for '{instance.id}'. Update halted."))
                print(Style.YELLOW("The cluster might be in a partially updated state. Please investigate."))
                return

            with Spinner(f"Waiting for '{instance.id}' to become healthy", symbols="t") as s:
                for attempt in range(5):
                    s.message = f"Waiting for '{instance.id}' to become healthy (attempt {attempt + 1}/5)"
                    time.sleep(2)
                    health = instance.get_health()
                    if health.get('status') == 'OK':
                        print(
                            f"\n{Style.GREEN('✅ Instance')} '{instance.id}' {Style.GREEN('is healthy with new version.')}")
                        break
                else:
                    print(
                        f"\n{Style.RED2('CRITICAL:')} Instance '{instance.id}' did not become healthy after update. Update halted.")
                    return

        print(f"\n--- {Style.GREEN2('Rolling Update Complete')} ---")
get_instances(instance_id=None)

Returns a list of instances to operate on.

Source code in toolboxv2/utils/system/db_cli_manager.py
225
226
227
228
229
230
231
def get_instances(self, instance_id: str | None = None) -> list[DBInstanceManager]:
    """Returns a list of instances to operate on."""
    if instance_id:
        if instance_id not in self.instances:
            raise ValueError(f"Instance ID '{instance_id}' not found in '{self.config_path}'.")
        return [self.instances[instance_id]]
    return list(self.instances.values())
update_all_rolling(new_executable_path, new_version, instance_id=None)

Performs a zero-downtime rolling update of the cluster.

Source code in toolboxv2/utils/system/db_cli_manager.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
def update_all_rolling(self, new_executable_path: Path, new_version: str, instance_id: str | None = None):
    """Performs a zero-downtime rolling update of the cluster."""
    print(f"--- {Style.Bold(f'Starting Rolling Update to Version {Style.YELLOW(new_version)}')} ---")
    instances_to_update = self.get_instances(instance_id)
    for i, instance in enumerate(instances_to_update):
        print(
            f"\n{Style.CYAN(f'[{i + 1}/{len(instances_to_update)}] Updating instance')} '{Style.WHITE(instance.id)}'...")

        if not instance.stop():
            print(Style.RED2(f"CRITICAL: Failed to stop old instance '{instance.id}'. Aborting update."))
            return

        if not instance.start(new_executable_path, new_version):
            print(Style.RED2(f"CRITICAL: Failed to start new version for '{instance.id}'. Update halted."))
            print(Style.YELLOW("The cluster might be in a partially updated state. Please investigate."))
            return

        with Spinner(f"Waiting for '{instance.id}' to become healthy", symbols="t") as s:
            for attempt in range(5):
                s.message = f"Waiting for '{instance.id}' to become healthy (attempt {attempt + 1}/5)"
                time.sleep(2)
                health = instance.get_health()
                if health.get('status') == 'OK':
                    print(
                        f"\n{Style.GREEN('✅ Instance')} '{instance.id}' {Style.GREEN('is healthy with new version.')}")
                    break
            else:
                print(
                    f"\n{Style.RED2('CRITICAL:')} Instance '{instance.id}' did not become healthy after update. Update halted.")
                return

    print(f"\n--- {Style.GREEN2('Rolling Update Complete')} ---")
DBInstanceManager

Manages a single r_blob_db instance.

Source code in toolboxv2/utils/system/db_cli_manager.py
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
class DBInstanceManager:
    """Manages a single r_blob_db instance."""

    def __init__(self, instance_id: str, config: dict):
        self.id = instance_id
        self.port = config['port']
        self.host = config.get('host', '127.0.0.1')
        self.data_dir = Path(config['data_dir'])
        self.state_file = self.data_dir / "instance_state.json"
        self.log_file = self.data_dir / "instance.log"  # Added for better logging info

    def read_state(self) -> tuple[int | None, str | None]:
        """Reads the PID and version from the instance's state file."""
        if not self.state_file.exists():
            return None, None
        try:
            with open(self.state_file) as f:
                state = json.load(f)
            return state.get('pid'), state.get('version')
        except (json.JSONDecodeError, ValueError, FileNotFoundError):
            return None, None

    def write_state(self, pid: int | None, version: str | None):
        """Writes the PID and version to the state file."""
        self.data_dir.mkdir(parents=True, exist_ok=True)
        state = {'pid': pid, 'version': version}
        with open(self.state_file, 'w') as f:
            json.dump(state, f, indent=4)

    def is_running(self) -> bool:
        """Checks if the process associated with this instance is running."""
        pid, _ = self.read_state()
        return psutil.pid_exists(pid) if pid else False

    def start(self, executable_path: Path, version: str) -> bool:
        """Starts the instance process and detaches, redirecting output to a log file."""
        if self.is_running():
            print(Style.YELLOW(f"Instance '{self.id}' is already running."))
            return True

        print(Style.CYAN(f"🚀 Starting instance '{self.id}' on port {self.port}..."))
        self.data_dir.mkdir(parents=True, exist_ok=True)
        log_handle = open(self.log_file, 'a')

        env = os.environ.copy()
        env["R_BLOB_DB_CLEAN"] = os.getenv("R_BLOB_DB_CLEAN", "false")
        env["R_BLOB_DB_PORT"] = str(self.port)
        env["R_BLOB_DB_DATA_DIR"] = str(self.data_dir.resolve())
        env["RUST_LOG"] = "info,tower_http=debug" # "error"

        try:
            if executable_path is None:
                raise ValueError(f"\n{Style.RED2('❌ ERROR:')} Executable not found. Build it first.")
            with Spinner(f"Launching process for '{self.id}'", symbols="d"):
                process = subprocess.Popen(
                    [str(executable_path.resolve())],
                    env=env,
                    stdout=log_handle,
                    stderr=log_handle,
                    creationflags=subprocess.DETACHED_PROCESS if platform.system() == "Windows" else 0
                )
                time.sleep(1.5)

            if process.poll() is not None:
                print(f"\n{Style.RED2('❌ ERROR:')} Instance '{self.id}' failed to start. Check logs:")
                print(f"    {Style.GREY(self.log_file)}")
                return False

            self.write_state(process.pid, version)
            print(
                f"\n{Style.GREEN2('✅ Instance')} '{Style.Bold(self.id)}' {Style.GREEN2('started successfully.')} {Style.GREY(f'(PID: {process.pid})')}")
            print(f"   {Style.BLUE('Logging to:')} {Style.GREY(self.log_file)}")
            return True
        except Exception as e:
            print(f"\n{Style.RED2('❌ ERROR:')} Failed to launch instance '{self.id}': {e}")
            log_handle.close()
            return False

    def stop(self, timeout: int = 10) -> bool:
        """Stops the instance process gracefully."""
        if not self.is_running():
            print(Style.YELLOW(f"Instance '{self.id}' is not running."))
            self.write_state(None, None)
            return True

        pid, _ = self.read_state()
        with Spinner(f"Stopping '{self.id}' (PID: {pid})", symbols="+", time_in_s=timeout, count_down=True) as s:
            try:
                proc = psutil.Process(pid)
                proc.terminate()
                proc.wait(timeout)
            except psutil.TimeoutExpired:
                s.message = f"Force killing '{self.id}'"
                proc.kill()
            except psutil.NoSuchProcess:
                pass
            except Exception as e:
                print(f"\n{Style.RED2('❌ ERROR:')} Failed to stop instance '{self.id}': {e}")
                return False

        self.write_state(None, None)
        print(f"\n{Style.VIOLET2('⏹️  Instance')} '{Style.Bold(self.id)}' {Style.VIOLET2('stopped.')}")
        return True

    def get_health(self) -> dict:
        """Performs a health check on the running instance."""
        if not self.is_running():
            return {'id': self.id, 'status': 'STOPPED', 'error': 'Process not running'}

        pid, version = self.read_state()
        health_url = f"http://{self.host}:{self.port}/health"
        start_time = time.monotonic()
        try:
            response = requests.get(health_url, timeout=2)
            latency_ms = (time.monotonic() - start_time) * 1000
            response.raise_for_status()
            health_data = response.json()
            health_data.update({
                'id': self.id, 'pid': pid, 'latency_ms': round(latency_ms),
                'server_version': health_data.pop('version', 'unknown'),
                'manager_known_version': version
            })
            return health_data
        except requests.exceptions.RequestException as e:
            return {'id': self.id, 'status': 'UNREACHABLE', 'pid': pid, 'error': str(e)}
        except Exception as e:
            return {'id': self.id, 'status': 'ERROR', 'pid': pid, 'error': f'Failed to parse health response: {e}'}
get_health()

Performs a health check on the running instance.

Source code in toolboxv2/utils/system/db_cli_manager.py
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def get_health(self) -> dict:
    """Performs a health check on the running instance."""
    if not self.is_running():
        return {'id': self.id, 'status': 'STOPPED', 'error': 'Process not running'}

    pid, version = self.read_state()
    health_url = f"http://{self.host}:{self.port}/health"
    start_time = time.monotonic()
    try:
        response = requests.get(health_url, timeout=2)
        latency_ms = (time.monotonic() - start_time) * 1000
        response.raise_for_status()
        health_data = response.json()
        health_data.update({
            'id': self.id, 'pid': pid, 'latency_ms': round(latency_ms),
            'server_version': health_data.pop('version', 'unknown'),
            'manager_known_version': version
        })
        return health_data
    except requests.exceptions.RequestException as e:
        return {'id': self.id, 'status': 'UNREACHABLE', 'pid': pid, 'error': str(e)}
    except Exception as e:
        return {'id': self.id, 'status': 'ERROR', 'pid': pid, 'error': f'Failed to parse health response: {e}'}
is_running()

Checks if the process associated with this instance is running.

Source code in toolboxv2/utils/system/db_cli_manager.py
90
91
92
93
def is_running(self) -> bool:
    """Checks if the process associated with this instance is running."""
    pid, _ = self.read_state()
    return psutil.pid_exists(pid) if pid else False
read_state()

Reads the PID and version from the instance's state file.

Source code in toolboxv2/utils/system/db_cli_manager.py
72
73
74
75
76
77
78
79
80
81
def read_state(self) -> tuple[int | None, str | None]:
    """Reads the PID and version from the instance's state file."""
    if not self.state_file.exists():
        return None, None
    try:
        with open(self.state_file) as f:
            state = json.load(f)
        return state.get('pid'), state.get('version')
    except (json.JSONDecodeError, ValueError, FileNotFoundError):
        return None, None
start(executable_path, version)

Starts the instance process and detaches, redirecting output to a log file.

Source code in toolboxv2/utils/system/db_cli_manager.py
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def start(self, executable_path: Path, version: str) -> bool:
    """Starts the instance process and detaches, redirecting output to a log file."""
    if self.is_running():
        print(Style.YELLOW(f"Instance '{self.id}' is already running."))
        return True

    print(Style.CYAN(f"🚀 Starting instance '{self.id}' on port {self.port}..."))
    self.data_dir.mkdir(parents=True, exist_ok=True)
    log_handle = open(self.log_file, 'a')

    env = os.environ.copy()
    env["R_BLOB_DB_CLEAN"] = os.getenv("R_BLOB_DB_CLEAN", "false")
    env["R_BLOB_DB_PORT"] = str(self.port)
    env["R_BLOB_DB_DATA_DIR"] = str(self.data_dir.resolve())
    env["RUST_LOG"] = "info,tower_http=debug" # "error"

    try:
        if executable_path is None:
            raise ValueError(f"\n{Style.RED2('❌ ERROR:')} Executable not found. Build it first.")
        with Spinner(f"Launching process for '{self.id}'", symbols="d"):
            process = subprocess.Popen(
                [str(executable_path.resolve())],
                env=env,
                stdout=log_handle,
                stderr=log_handle,
                creationflags=subprocess.DETACHED_PROCESS if platform.system() == "Windows" else 0
            )
            time.sleep(1.5)

        if process.poll() is not None:
            print(f"\n{Style.RED2('❌ ERROR:')} Instance '{self.id}' failed to start. Check logs:")
            print(f"    {Style.GREY(self.log_file)}")
            return False

        self.write_state(process.pid, version)
        print(
            f"\n{Style.GREEN2('✅ Instance')} '{Style.Bold(self.id)}' {Style.GREEN2('started successfully.')} {Style.GREY(f'(PID: {process.pid})')}")
        print(f"   {Style.BLUE('Logging to:')} {Style.GREY(self.log_file)}")
        return True
    except Exception as e:
        print(f"\n{Style.RED2('❌ ERROR:')} Failed to launch instance '{self.id}': {e}")
        log_handle.close()
        return False
stop(timeout=10)

Stops the instance process gracefully.

Source code in toolboxv2/utils/system/db_cli_manager.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def stop(self, timeout: int = 10) -> bool:
    """Stops the instance process gracefully."""
    if not self.is_running():
        print(Style.YELLOW(f"Instance '{self.id}' is not running."))
        self.write_state(None, None)
        return True

    pid, _ = self.read_state()
    with Spinner(f"Stopping '{self.id}' (PID: {pid})", symbols="+", time_in_s=timeout, count_down=True) as s:
        try:
            proc = psutil.Process(pid)
            proc.terminate()
            proc.wait(timeout)
        except psutil.TimeoutExpired:
            s.message = f"Force killing '{self.id}'"
            proc.kill()
        except psutil.NoSuchProcess:
            pass
        except Exception as e:
            print(f"\n{Style.RED2('❌ ERROR:')} Failed to stop instance '{self.id}': {e}")
            return False

    self.write_state(None, None)
    print(f"\n{Style.VIOLET2('⏹️  Instance')} '{Style.Bold(self.id)}' {Style.VIOLET2('stopped.')}")
    return True
write_state(pid, version)

Writes the PID and version to the state file.

Source code in toolboxv2/utils/system/db_cli_manager.py
83
84
85
86
87
88
def write_state(self, pid: int | None, version: str | None):
    """Writes the PID and version to the state file."""
    self.data_dir.mkdir(parents=True, exist_ok=True)
    state = {'pid': pid, 'version': version}
    with open(self.state_file, 'w') as f:
        json.dump(state, f, indent=4)
cli_db_runner()

The main entry point for the CLI application.

Source code in toolboxv2/utils/system/db_cli_manager.py
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
def cli_db_runner():
    """The main entry point for the CLI application."""
    parser = argparse.ArgumentParser(
        description=f"🚀 {Style.Bold('A manager for r_blob_db instances and clusters.')}",
        formatter_class=argparse.RawTextHelpFormatter
    )
    subparsers = parser.add_subparsers(dest="action", required=True, help="Available actions")

    # Define common arguments
    instance_arg = {'name_or_flags': ['--instance-id'], 'type': str,
                    'help': 'Target a specific instance ID. If omitted, action applies to the whole cluster.',
                    'default': None}
    version_arg = {'name_or_flags': ['--version'], 'type': str,
                   'help': 'Specify a version string for the executable (e.g., "1.2.0").', 'default': 'dev'}

    # --- Define Commands ---
    p_start = subparsers.add_parser('start', help='Start instance(s).')
    p_start.add_argument(*instance_arg['name_or_flags'],
                         **{k: v for k, v in instance_arg.items() if k != 'name_or_flags'})
    p_start.add_argument(*version_arg['name_or_flags'],
                         **{k: v for k, v in version_arg.items() if k != 'name_or_flags'})

    p_stop = subparsers.add_parser('stop', help='Stop instance(s).')
    p_stop.add_argument(*instance_arg['name_or_flags'],
                        **{k: v for k, v in instance_arg.items() if k != 'name_or_flags'})

    p_status = subparsers.add_parser('status', help='Show the running status of instance(s).')
    p_status.add_argument(*instance_arg['name_or_flags'],
                          **{k: v for k, v in instance_arg.items() if k != 'name_or_flags'})

    p_health = subparsers.add_parser('health', help='Perform a health check on instance(s).')
    p_health.add_argument(*instance_arg['name_or_flags'],
                          **{k: v for k, v in instance_arg.items() if k != 'name_or_flags'})

    p_update = subparsers.add_parser('update', help='Perform a rolling update on the cluster.')
    p_update.add_argument(*instance_arg['name_or_flags'],
                          **{k: v for k, v in instance_arg.items() if k != 'name_or_flags'})
    version_arg_update = {**version_arg, 'required': True}
    p_update.add_argument(*version_arg_update['name_or_flags'],
                          **{k: v for k, v in version_arg_update.items() if k != 'name_or_flags'})

    subparsers.add_parser('build', help='Build the Rust executable from source.')
    subparsers.add_parser('clean', help='Clean the Rust build artifacts.')

    # --- Execute Command ---
    args = parser.parse_args()

    if args.action == 'build':
        handle_build()
        return
    if args.action == 'clean':
        handle_clean()
        return

    manager = ClusterManager()

    if args.action in ['start', 'update']:
        executable_path = get_executable_path(update=(args.action == 'update'))
        if not executable_path:
            print(Style.RED(f"ERROR: Could not find the {EXECUTABLE_NAME} executable."))
            print(Style.YELLOW("Please build it first with: python -m toolboxv2.r_blob_db.db_cli build"))
            return

    if args.action == 'start':
        manager.start_all(executable_path, args.version, args.instance_id)
    elif args.action == 'stop':
        manager.stop_all(args.instance_id)
    elif args.action == 'status':
        manager.status_all(args.instance_id)
    elif args.action == 'health':
        manager.health_check_all(args.instance_id)
    elif args.action == 'update':
        manager.update_all_rolling(executable_path, args.version, args.instance_id)
get_executable_path(base_name=EXECUTABLE_NAME, update=False)

Finds the release executable in standard locations.

Source code in toolboxv2/utils/system/db_cli_manager.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def get_executable_path(base_name: str = EXECUTABLE_NAME, update=False) -> Path | None:
    """Finds the release executable in standard locations."""
    name_with_ext = f"{base_name}.exe" if platform.system() == "Windows" else base_name
    from toolboxv2 import tb_root_dir
    search_paths = [
        tb_root_dir / "bin" / name_with_ext,
        tb_root_dir / "r_blob_db" / "target" / "release" / name_with_ext,
    ]
    if update:
        search_paths = search_paths[::-1]
    for path in search_paths:
        if path.is_file():
            return path.resolve()
    return None
main_tool
MainTool
Source code in toolboxv2/utils/system/main_tool.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
class MainTool:
    toolID: str = ""
    # app = None
    interface = None
    spec = "app"
    name = ""
    color = "Bold"
    stuf = False

    def __init__(self, *args, **kwargs):
        """
        Standard constructor used for arguments pass
        Do not override. Use __ainit__ instead
        """
        self.__storedargs = args, kwargs
        self.tools = kwargs.get("tool", {})
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
        if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
            self.on_exit =self.app.tb(
                mod_name=self.name,
                name=kwargs.get("on_exit").__name__,
                version=self.version if hasattr(self, 'version') else "0.0.0",
            )(kwargs.get("on_exit"))
        self.async_initialized = False
        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    pass
                else:
                    self.todo()
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")

    async def __ainit__(self, *args, **kwargs):
        self.version = kwargs["v"]
        self.tools = kwargs.get("tool", {})
        self.name = kwargs["name"]
        self.logger = kwargs.get("logs", get_logger())
        self.color = kwargs.get("color", "WHITE")
        self.todo = kwargs.get("load", kwargs.get("on_start"))
        if not hasattr(self, 'config'):
            self.config = {}
        self.user = None
        self.description = "A toolbox mod" if kwargs.get("description") is None else kwargs.get("description")
        if MainTool.interface is None:
            MainTool.interface = self.app.interface_type
        # Result.default(self.app.interface)

        if self.todo:
            try:
                if inspect.iscoroutinefunction(self.todo):
                    await self.todo()
                else:
                    pass
                await asyncio.sleep(0.1)
                get_logger().info(f"{self.name} on load suspended")
            except Exception as e:
                get_logger().error(f"Error loading mod {self.name} {e}")
                if self.app.debug:
                    import traceback
                    traceback.print_exc()
        else:
            get_logger().info(f"{self.name} no load require")
        self.app.print(f"TOOL : {self.spec}.{self.name} online")



    @property
    def app(self):
        return get_app(
            from_=f"{self.spec}.{self.name}|{self.toolID if self.toolID else '*' + MainTool.toolID} {self.interface if self.interface else MainTool.interface}")

    @app.setter
    def app(self, v):
        raise PermissionError(f"You cannot set the App Instance! {v=}")

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None):

        if data_to is None:
            data_to = MainTool.interface if MainTool.interface is not None else ToolBoxInterfaces.cli

        if data is None:
            data = {}

        if data_info is None:
            data_info = {}

        return Result(
            error,
            ToolBoxResult(data_info=data_info, data=data, data_to=data_to),
            ToolBoxInfo(exec_code=exec_code, help_text=help_text)
        )

    def print(self, message, end="\n", **kwargs):
        if self.stuf:
            return

        self.app.print(Style.style_dic[self.color] + self.name + Style.style_dic["END"] + ":", message, end=end,
                       **kwargs)

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    def get_version(self) -> str:
        """"Returns the version"""
        return self.version

    async def get_user(self, username: str) -> Result:
        return await self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)

    async def __initobj(self):
        """Crutch used for __await__ after spawning"""
        assert not self.async_initialized
        self.async_initialized = True
        # pass the parameters to __ainit__ that passed to __init__
        await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
        return self

    def __await__(self):
        return self.__initobj().__await__()
__init__(*args, **kwargs)

Standard constructor used for arguments pass Do not override. Use ainit instead

Source code in toolboxv2/utils/system/main_tool.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def __init__(self, *args, **kwargs):
    """
    Standard constructor used for arguments pass
    Do not override. Use __ainit__ instead
    """
    self.__storedargs = args, kwargs
    self.tools = kwargs.get("tool", {})
    self.logger = kwargs.get("logs", get_logger())
    self.color = kwargs.get("color", "WHITE")
    self.todo = kwargs.get("load", kwargs.get("on_start", lambda: None))
    if "on_exit" in kwargs and isinstance(kwargs.get("on_exit"), Callable):
        self.on_exit =self.app.tb(
            mod_name=self.name,
            name=kwargs.get("on_exit").__name__,
            version=self.version if hasattr(self, 'version') else "0.0.0",
        )(kwargs.get("on_exit"))
    self.async_initialized = False
    if self.todo:
        try:
            if inspect.iscoroutinefunction(self.todo):
                pass
            else:
                self.todo()
            get_logger().info(f"{self.name} on load suspended")
        except Exception as e:
            get_logger().error(f"Error loading mod {self.name} {e}")
            if self.app.debug:
                import traceback
                traceback.print_exc()
    else:
        get_logger().info(f"{self.name} no load require")
__initobj() async

Crutch used for await after spawning

Source code in toolboxv2/utils/system/main_tool.py
174
175
176
177
178
179
180
async def __initobj(self):
    """Crutch used for __await__ after spawning"""
    assert not self.async_initialized
    self.async_initialized = True
    # pass the parameters to __ainit__ that passed to __init__
    await self.__ainit__(*self.__storedargs[0], **self.__storedargs[1])
    return self
get_version()

"Returns the version

Source code in toolboxv2/utils/system/main_tool.py
167
168
169
def get_version(self) -> str:
    """"Returns the version"""
    return self.version
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/main_tool.py
164
165
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
get_version_from_pyproject(pyproject_path='../pyproject.toml')

Reads the version from the pyproject.toml file.

Source code in toolboxv2/utils/system/main_tool.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def get_version_from_pyproject(pyproject_path='../pyproject.toml'):
    """Reads the version from the pyproject.toml file."""
    if not os.path.exists(pyproject_path) and pyproject_path=='../pyproject.toml':
        pyproject_path = 'pyproject.toml'
    if not os.path.exists(pyproject_path) and pyproject_path=='pyproject.toml':
        return "0.1.21"

    try:
        import toml
        # Load the pyproject.toml file
        with open(pyproject_path) as file:
            pyproject_data = toml.load(file)

        # Extract the version from the 'project' section
        version = pyproject_data.get('project', {}).get('version')

        if version is None:
            raise ValueError(f"Version not found in {pyproject_path}")

        return version
    except Exception as e:
        print(f"Error reading version: {e}")
        return "0.0.0"
state_system

The Task of the State System is : 1 Kep trak of the current state of the ToolBox and its dependency's 2 tracks the shasum of all mod and runnabael 3 the version of all mod

The state : {"utils":{"file_name": {"version":##,"shasum"}} ,"mods":{"file_name": {"version":##,"shasum":##,"src-url":##}} ,"runnable":{"file_name": {"version":##,"shasum":##,"src-url":##}} ,"api":{"file_name": {"version":##,"shasum"}} ,"app":{"file_name": {"version":##,"shasum":##,"src-url":##}} }

trans form state from on to an other.

tcm_p2p_cli
InstanceManager

Manages a single named instance (relay or peer) of the P2P application.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
class InstanceManager:
    """Manages a single named instance (relay or peer) of the P2P application."""

    def __init__(self, name: str):
        self.name = name
        self.instance_dir = INSTANCES_ROOT_DIR / self.name
        self.state_file = self.instance_dir / "state.json"
        self.config_file = self.instance_dir / "config.toml"
        self.log_file = self.instance_dir / "instance.log"

    def read_state(self) -> dict:
        """Reads the instance's state (pid, mode, etc.) from its state file."""
        if not self.state_file.exists():
            return {}
        try:
            with open(self.state_file) as f:
                return json.load(f)
        except (json.JSONDecodeError, FileNotFoundError):
            return {}

    def write_state(self, state_data: dict):
        """Writes the instance's state to its state file."""
        self.instance_dir.mkdir(parents=True, exist_ok=True)
        with open(self.state_file, 'w') as f:
            json.dump(state_data, f, indent=2)

    def is_running(self) -> bool:
        """Checks if the process associated with this instance is active."""
        pid = self.read_state().get('pid')
        return psutil.pid_exists(pid) if pid else False

    def generate_config(self, mode: str, config_data: dict):
        """Generates the config.toml file for this specific instance."""
        content = f'mode = "{mode}"\n\n'

        if mode == "relay":
            content += "[relay]\n"
            content += f'bind_address = "{config_data.get("bind_address", "0.0.0.0:9000")}"\n'
            content += f'password = "{config_data.get("password", "")}"\n'

        elif mode == "peer":
            content += "[peer]\n"
            content += f'relay_address = "{config_data.get("relay_address", "127.0.0.1:9000")}"\n'
            content += f'relay_password = "{config_data.get("relay_password", "")}"\n'
            content += f'peer_id = "{config_data.get("peer_id", "default-peer")}"\n'
            content += f'listen_address = "{config_data.get("listen_address", "127.0.0.1:8000")}"\n'
            content += f'forward_to_address = "{config_data.get("forward_to_address", "127.0.0.1:3000")}"\n'
            if config_data.get("target_peer_id"):
                content += f'target_peer_id = "{config_data.get("target_peer_id")}"\n'

        self.instance_dir.mkdir(parents=True, exist_ok=True)
        with open(self.config_file, "w") as f:
            f.write(content)
        print(f"    {Style.GREEN('Generated config:')} {Style.GREY(str(self.config_file))}")

    def start(self, executable_path: Path, mode: str, config_data: dict) -> bool:
        """Starts the instance process, detaches it, and logs its state."""
        if self.is_running():
            print(Style.YELLOW(f"Instance '{self.name}' is already running."))
            return True

        print(Style.CYAN(f"🚀 Starting instance '{self.name}'..."))
        self.generate_config(mode, config_data)
        log_handle = open(self.log_file, 'a')

        try:
            with Spinner(f"Launching process for '{self.name}'", symbols="d"):
                process = subprocess.Popen(
                    [str(executable_path)],
                    cwd=str(self.instance_dir),
                    stdout=log_handle,
                    stderr=log_handle,
                    creationflags=subprocess.DETACHED_PROCESS if platform.system() == "Windows" else 0
                )
                time.sleep(1.5)  # Give it a moment to stabilize or crash

            if process.poll() is not None:
                print(f"\n{Style.RED2('❌ ERROR:')} Instance '{self.name}' failed to start. Check logs for details:")
                print(f"    {Style.GREY(self.log_file)}")
                return False

            state = {'pid': process.pid, 'mode': mode, 'config': config_data}
            self.write_state(state)
            print(
                f"\n{Style.GREEN2('✅ Instance')} '{Style.Bold(self.name)}' {Style.GREEN2('started successfully.')} {Style.GREY(f'(PID: {process.pid})')}")
            print(f"   {Style.BLUE('Logging to:')} {Style.GREY(self.log_file)}")
            return True
        except Exception as e:
            print(f"\n{Style.RED2('❌ ERROR:')} Failed to launch instance '{self.name}': {e}")
            log_handle.close()
            return False

    def stop(self, timeout: int = 10) -> bool:
        """Stops the instance process gracefully with a forced kill fallback."""
        if not self.is_running():
            print(Style.YELLOW(f"Instance '{self.name}' is not running."))
            self.write_state({})
            return True

        pid = self.read_state().get('pid')
        with Spinner(f"Stopping '{self.name}' (PID: {pid})", symbols="+", time_in_s=timeout, count_down=True) as s:
            try:
                proc = psutil.Process(pid)
                proc.terminate()
                proc.wait(timeout)
            except psutil.TimeoutExpired:
                s.message = f"Force killing '{self.name}'"
                proc.kill()
            except psutil.NoSuchProcess:
                pass
            except Exception as e:
                print(f"\n{Style.RED2('❌ ERROR:')} Failed to stop instance '{self.name}': {e}")
                return False

        self.write_state({})
        print(f"\n{Style.VIOLET2('⏹️  Instance')} '{Style.Bold(self.name)}' {Style.VIOLET2('stopped.')}")
        return True
generate_config(mode, config_data)

Generates the config.toml file for this specific instance.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
def generate_config(self, mode: str, config_data: dict):
    """Generates the config.toml file for this specific instance."""
    content = f'mode = "{mode}"\n\n'

    if mode == "relay":
        content += "[relay]\n"
        content += f'bind_address = "{config_data.get("bind_address", "0.0.0.0:9000")}"\n'
        content += f'password = "{config_data.get("password", "")}"\n'

    elif mode == "peer":
        content += "[peer]\n"
        content += f'relay_address = "{config_data.get("relay_address", "127.0.0.1:9000")}"\n'
        content += f'relay_password = "{config_data.get("relay_password", "")}"\n'
        content += f'peer_id = "{config_data.get("peer_id", "default-peer")}"\n'
        content += f'listen_address = "{config_data.get("listen_address", "127.0.0.1:8000")}"\n'
        content += f'forward_to_address = "{config_data.get("forward_to_address", "127.0.0.1:3000")}"\n'
        if config_data.get("target_peer_id"):
            content += f'target_peer_id = "{config_data.get("target_peer_id")}"\n'

    self.instance_dir.mkdir(parents=True, exist_ok=True)
    with open(self.config_file, "w") as f:
        f.write(content)
    print(f"    {Style.GREEN('Generated config:')} {Style.GREY(str(self.config_file))}")
is_running()

Checks if the process associated with this instance is active.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
 99
100
101
102
def is_running(self) -> bool:
    """Checks if the process associated with this instance is active."""
    pid = self.read_state().get('pid')
    return psutil.pid_exists(pid) if pid else False
read_state()

Reads the instance's state (pid, mode, etc.) from its state file.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
83
84
85
86
87
88
89
90
91
def read_state(self) -> dict:
    """Reads the instance's state (pid, mode, etc.) from its state file."""
    if not self.state_file.exists():
        return {}
    try:
        with open(self.state_file) as f:
            return json.load(f)
    except (json.JSONDecodeError, FileNotFoundError):
        return {}
start(executable_path, mode, config_data)

Starts the instance process, detaches it, and logs its state.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def start(self, executable_path: Path, mode: str, config_data: dict) -> bool:
    """Starts the instance process, detaches it, and logs its state."""
    if self.is_running():
        print(Style.YELLOW(f"Instance '{self.name}' is already running."))
        return True

    print(Style.CYAN(f"🚀 Starting instance '{self.name}'..."))
    self.generate_config(mode, config_data)
    log_handle = open(self.log_file, 'a')

    try:
        with Spinner(f"Launching process for '{self.name}'", symbols="d"):
            process = subprocess.Popen(
                [str(executable_path)],
                cwd=str(self.instance_dir),
                stdout=log_handle,
                stderr=log_handle,
                creationflags=subprocess.DETACHED_PROCESS if platform.system() == "Windows" else 0
            )
            time.sleep(1.5)  # Give it a moment to stabilize or crash

        if process.poll() is not None:
            print(f"\n{Style.RED2('❌ ERROR:')} Instance '{self.name}' failed to start. Check logs for details:")
            print(f"    {Style.GREY(self.log_file)}")
            return False

        state = {'pid': process.pid, 'mode': mode, 'config': config_data}
        self.write_state(state)
        print(
            f"\n{Style.GREEN2('✅ Instance')} '{Style.Bold(self.name)}' {Style.GREEN2('started successfully.')} {Style.GREY(f'(PID: {process.pid})')}")
        print(f"   {Style.BLUE('Logging to:')} {Style.GREY(self.log_file)}")
        return True
    except Exception as e:
        print(f"\n{Style.RED2('❌ ERROR:')} Failed to launch instance '{self.name}': {e}")
        log_handle.close()
        return False
stop(timeout=10)

Stops the instance process gracefully with a forced kill fallback.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def stop(self, timeout: int = 10) -> bool:
    """Stops the instance process gracefully with a forced kill fallback."""
    if not self.is_running():
        print(Style.YELLOW(f"Instance '{self.name}' is not running."))
        self.write_state({})
        return True

    pid = self.read_state().get('pid')
    with Spinner(f"Stopping '{self.name}' (PID: {pid})", symbols="+", time_in_s=timeout, count_down=True) as s:
        try:
            proc = psutil.Process(pid)
            proc.terminate()
            proc.wait(timeout)
        except psutil.TimeoutExpired:
            s.message = f"Force killing '{self.name}'"
            proc.kill()
        except psutil.NoSuchProcess:
            pass
        except Exception as e:
            print(f"\n{Style.RED2('❌ ERROR:')} Failed to stop instance '{self.name}': {e}")
            return False

    self.write_state({})
    print(f"\n{Style.VIOLET2('⏹️  Instance')} '{Style.Bold(self.name)}' {Style.VIOLET2('stopped.')}")
    return True
write_state(state_data)

Writes the instance's state to its state file.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
93
94
95
96
97
def write_state(self, state_data: dict):
    """Writes the instance's state to its state file."""
    self.instance_dir.mkdir(parents=True, exist_ok=True)
    with open(self.state_file, 'w') as f:
        json.dump(state_data, f, indent=2)
find_instances()

Discovers all managed instances by scanning the instances directory.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
59
60
61
62
63
64
65
66
67
68
def find_instances() -> list['InstanceManager']:
    """Discovers all managed instances by scanning the instances directory."""
    if not INSTANCES_ROOT_DIR.is_dir():
        return []

    instance_managers = []
    for instance_dir in INSTANCES_ROOT_DIR.iterdir():
        if instance_dir.is_dir():
            instance_managers.append(InstanceManager(instance_dir.name))
    return instance_managers
get_executable_path(update=False)

Finds the release executable in standard locations.

Source code in toolboxv2/utils/system/tcm_p2p_cli.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def get_executable_path(update=False) -> Path | None:
    """Finds the release executable in standard locations."""
    # Look in a dedicated 'bin' folder first, then cargo's default
    from toolboxv2 import tb_root_dir
    search_paths = [
        tb_root_dir /"bin" / EXECUTABLE_NAME,
        tb_root_dir / "tcm"/ "target" / "release" / EXECUTABLE_NAME,
    ]
    if update:
        search_paths = search_paths[::-1]
    for path in search_paths:
        print(path)
        if path.is_file():
            return path.resolve()
    return None
types
AppType
Source code in toolboxv2/utils/system/types.py
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
class AppType:
    prefix: str
    id: str
    globals: dict[str, Any] = {"root": dict, }
    locals: dict[str, Any] = {"user": {'app': "self"}, }

    local_test: bool = False
    start_dir: str
    data_dir: str
    config_dir: str
    info_dir: str
    is_server:bool = False

    logger: logging.Logger
    logging_filename: str

    api_allowed_mods_list: list[str] = []

    version: str
    loop: asyncio.AbstractEventLoop

    keys: dict[str, str] = {
        "MACRO": "macro~~~~:",
        "MACRO_C": "m_color~~:",
        "HELPER": "helper~~~:",
        "debug": "debug~~~~:",
        "id": "name-spa~:",
        "st-load": "mute~load:",
        "comm-his": "comm-his~:",
        "develop-mode": "dev~mode~:",
        "provider::": "provider::",
    }

    defaults: dict[str, (bool or dict or dict[str, dict[str, str]] or str or list[str] or list[list]) | None] = {
        "MACRO": list[str],
        "MACRO_C": dict,
        "HELPER": dict,
        "debug": str,
        "id": str,
        "st-load": False,
        "comm-his": list[list],
        "develop-mode": bool,
    }

    cluster_manager: ClusterManager
    root_blob_storage: BlobStorage
    config_fh: FileHandler
    _debug: bool
    flows: dict[str, Callable]
    dev_modi: bool
    functions: dict[str, Any]
    modules: dict[str, Any]

    interface_type: ToolBoxInterfaces
    REFIX: str

    alive: bool
    called_exit: tuple[bool, float]
    args_sto: AppArgs
    system_flag = None
    session = None
    appdata = None
    exit_tasks = []

    enable_profiling: bool = False
    sto = None

    websocket_handlers: dict[str, dict[str, Callable]] = {}
    _rust_ws_bridge: Any = None

    def __init__(self, prefix: None | str= None, args: AppArgs | None = None):
        self.args_sto = args
        self.prefix = prefix
        """proxi attr"""

    def start_server(self):
        from toolboxv2.utils.system.api import manage_server
        if self.is_server:
            return
        manage_server("start")
        self.is_server = False

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    async def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        """proxi attr"""

    @property
    def debug(self):
        """proxi attr"""
        return self._debug

    def debug_rains(self, e):
        """proxi attr"""

    def set_flows(self, r):
        """proxi attr"""

    def run_flows(self, name, **kwargs):
        """proxi attr"""

    def rrun_flows(self, name, **kwargs):
        """proxi attr"""

    def idle(self):
        import time
        self.print("idle")
        try:
            while self.alive:
                time.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("idle done")

    async def a_idle(self):
        self.print("a idle")
        try:
            if hasattr(self, 'daemon_app'):
                self.print("serving daemon")
                await self.daemon_app.connect(self)
            else:
                self.print("serving default")
                while self.alive:
                    await asyncio.sleep(1)
        except KeyboardInterrupt:
            pass
        self.print("a idle done")

    @debug.setter
    def debug(self, value):
        """proxi attr"""

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):
        """proxi attr"""

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        """proxi attr"""

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        """proxi attr"""

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
        """proxi attr"""

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
        """proxi attr"""

    def save_initialized_module(self, tools_class, spec):
        """proxi attr"""

    def mod_online(self, mod_name, installed=False):
        """proxi attr"""

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0):
        """proxi attr"""

    def save_exit(self):
        """proxi attr"""

    def load_mod(self, mod_name: str, mlm='I', **kwargs):
        """proxi attr"""

    async def init_module(self, modular):
        return await self.load_mod(modular)

    async def load_external_mods(self):
        """proxi attr"""

    async def load_all_mods_in_file(self, working_dir="mods"):
        """proxi attr"""

    def get_all_mods(self, working_dir="mods", path_to="./runtime"):
        """proxi attr"""

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    def print_ok(self):
        """proxi attr"""
        self.logger.info("OK")

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        """proxi attr"""

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
        """proxi attr"""

    def remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        """proxi attr"""

    def exit(self):
        """proxi attr"""

    def web_context(self) -> str:
        """returns the build index ( toolbox web component )"""

    async def a_exit(self):
        """proxi attr"""

    def save_load(self, modname, spec='app'):
        """proxi attr"""

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """

    def run_a_from_sync(self, function, *args):
        """
        run a async fuction
        """

    def run_bg_task_advanced(self, task, *args, **kwargs):
        """
        proxi attr
        """

    def wait_for_bg_tasks(self, timeout=None):
        """
        proxi attr
        """

    def run_bg_task(self, task):
        """
                run a async fuction
                """
    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        """proxi attr"""

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        """proxi attr"""

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
        """
        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        mod_function_name = f"{modular_name}.{function_name}"

        proxi attr
        """

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                       args_=None,
                       kwargs_=None,
                       *args, **kwargs):
        """run a function remote via http / https"""

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):
        """proxi attr"""

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):
        """proxi attr"""

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        """proxi attr"""

    @staticmethod
    def print(text, *args, **kwargs):
        """proxi attr"""

    @staticmethod
    def sprint(text, *args, **kwargs):
        """proxi attr"""

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def _register_function(self, module_name, func_name, data):
        """proxi attr"""

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial=False,
                          exit_f=False,
                          test=True,
                          samples=None,
                          state=None,
                          pre_compute=None,
                          post_compute=None,
                          memory_cache=False,
                          file_cache=False,
                          row=False,
                          request_as_kwarg=False,
                          memory_cache_max_size=100,
                          memory_cache_ttl=300,
                          websocket_handler: str | None = None,):
        """proxi attr"""

        # data = {
        #     "type": type_,
        #     "module_name": module_name,
        #     "func_name": func_name,
        #     "level": level,
        #     "restrict_in_virtual_mode": restrict_in_virtual_mode,
        #     "func": func,
        #     "api": api,
        #     "helper": helper,
        #     "version": version,
        #     "initial": initial,
        #     "exit_f": exit_f,
        #     "__module__": func.__module__,
        #     "signature": sig,
        #     "params": params,
        #     "state": (
        #         False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
        #     "do_test": test,
        #     "samples": samples,
        #     "request_as_kwarg": request_as_kwarg,

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str or None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           row=False,
           request_as_kwarg: bool = False,
           state: bool or None = None,
           level: int = 0,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           websocket_handler: str | None = None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      row=row,
                                      request_as_kwarg=request_as_kwarg,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl)

    def print_functions(self, name=None):


        if not self.functions:
            print("Nothing to see")
            return

        def helper(_functions):
            for func_name, data in _functions.items():
                if not isinstance(data, dict):
                    continue

                func_type = data.get('type', 'Unknown')
                func_level = 'r' if data['level'] == -1 else data['level']
                api_status = 'Api' if data.get('api', False) else 'Non-Api'

                print(f"  Function: {func_name}{data.get('signature', '()')}; "
                      f"Type: {func_type}, Level: {func_level}, {api_status}")

        if name is not None:
            functions = self.functions.get(name)
            if functions is not None:
                print(f"\nModule: {name}; Type: {functions.get('app_instance_type', 'Unknown')}")
                helper(functions)
                return
        for module, functions in self.functions.items():
            print(f"\nModule: {module}; Type: {functions.get('app_instance_type', 'Unknown')}")
            helper(functions)

    def save_autocompletion_dict(self):
        """proxi attr"""

    def get_autocompletion_dict(self):
        """proxi attr"""

    def get_username(self, get_input=False, default="loot") -> str:
        """proxi attr"""

    def save_registry_as_enums(self, directory: str, filename: str):
        """proxi attr"""

    async def execute_all_functions_(self, m_query='', f_query=''):
        print("Executing all functions")
        from ..extras import generate_test_cases
        all_data = {
            "modular_run": 0,
            "modular_fatal_error": 0,
            "errors": 0,
            "modular_sug": 0,
            "coverage": [],
            "total_coverage": {},
        }
        items = list(self.functions.items()).copy()
        for module_name, functions in items:
            infos = {
                "functions_run": 0,
                "functions_fatal_error": 0,
                "error": 0,
                "functions_sug": 0,
                'calls': {},
                'callse': {},
                "coverage": [0, 0],
            }
            all_data['modular_run'] += 1
            if not module_name.startswith(m_query):
                all_data['modular_sug'] += 1
                continue

            with Spinner(message=f"In {module_name}| "):
                f_items = list(functions.items()).copy()
                for function_name, function_data in f_items:
                    if not isinstance(function_data, dict):
                        continue
                    if not function_name.startswith(f_query):
                        continue
                    test: list = function_data.get('do_test')
                    # print(test, module_name, function_name, function_data)
                    infos["coverage"][0] += 1
                    if test is False:
                        continue

                    with Spinner(message=f"\t\t\t\t\t\tfuction {function_name}..."):
                        params: list = function_data.get('params')
                        sig: signature = function_data.get('signature')
                        state: bool = function_data.get('state')
                        samples: bool = function_data.get('samples')

                        test_kwargs_list = [{}]

                        if params is not None:
                            test_kwargs_list = samples if samples is not None else generate_test_cases(sig=sig)
                            # print(test_kwargs)
                            # print(test_kwargs[0])
                            # test_kwargs = test_kwargs_list[0]
                        # print(module_name, function_name, test_kwargs_list)
                        infos["coverage"][1] += 1
                        for test_kwargs in test_kwargs_list:
                            try:
                                # print(f"test Running {state=} |{module_name}.{function_name}")
                                result = await self.a_run_function((module_name, function_name),
                                                                   tb_run_function_with_state=state,
                                                                   **test_kwargs)
                                if not isinstance(result, Result):
                                    result = Result.ok(result)
                                if result.info.exec_code == 0:
                                    infos['calls'][function_name] = [test_kwargs, str(result)]
                                    infos['functions_sug'] += 1
                                else:
                                    infos['functions_sug'] += 1
                                    infos['error'] += 1
                                    infos['callse'][function_name] = [test_kwargs, str(result)]
                            except Exception as e:
                                infos['functions_fatal_error'] += 1
                                infos['callse'][function_name] = [test_kwargs, str(e)]
                            finally:
                                infos['functions_run'] += 1

                if infos['functions_run'] == infos['functions_sug']:
                    all_data['modular_sug'] += 1
                else:
                    all_data['modular_fatal_error'] += 1
                if infos['error'] > 0:
                    all_data['errors'] += infos['error']

                all_data[module_name] = infos
                if infos['coverage'][0] == 0:
                    c = 0
                else:
                    c = infos['coverage'][1] / infos['coverage'][0]
                all_data["coverage"].append(f"{module_name}:{c:.2f}\n")
        total_coverage = sum([float(t.split(":")[-1]) for t in all_data["coverage"]]) / len(all_data["coverage"])
        print(
            f"\n{all_data['modular_run']=}\n{all_data['modular_sug']=}\n{all_data['modular_fatal_error']=}\n{total_coverage=}")
        d = analyze_data(all_data)
        return Result.ok(data=all_data, data_info=d)

    async def execute_function_test(self, module_name: str, function_name: str,
                                    function_data: dict, test_kwargs: dict,
                                    profiler: cProfile.Profile) -> tuple[bool, str, dict, float]:
        start_time = time.time()
        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            try:
                result = await self.a_run_function(
                    (module_name, function_name),
                    tb_run_function_with_state=function_data.get('state'),
                    **test_kwargs
                )

                if not isinstance(result, Result):
                    result = Result.ok(result)

                success = result.info.exec_code == 0
                execution_time = time.time() - start_time
                return success, str(result), test_kwargs, execution_time
            except Exception as e:
                execution_time = time.time() - start_time
                return False, str(e), test_kwargs, execution_time

    async def process_function(self, module_name: str, function_name: str,
                               function_data: dict, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()
        info = ModuleInfo()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            if not isinstance(function_data, dict):
                return function_name, info

            test = function_data.get('do_test')
            info.coverage[0] += 1

            if test is False:
                return function_name, info

            params = function_data.get('params')
            sig = function_data.get('signature')
            samples = function_data.get('samples')

            test_kwargs_list = [{}] if params is None else (
                samples if samples is not None else generate_test_cases(sig=sig)
            )

            info.coverage[1] += 1

            # Create tasks for all test cases
            tasks = [
                self.execute_function_test(module_name, function_name, function_data, test_kwargs, profiler)
                for test_kwargs in test_kwargs_list
            ]

            # Execute all tests concurrently
            results = await asyncio.gather(*tasks)

            total_execution_time = 0
            for success, result_str, test_kwargs, execution_time in results:
                info.functions_run += 1
                total_execution_time += execution_time

                if success:
                    info.functions_sug += 1
                    info.calls[function_name] = [test_kwargs, result_str]
                else:
                    info.functions_sug += 1
                    info.error += 1
                    info.callse[function_name] = [test_kwargs, result_str]

            info.execution_time = time.time() - start_time
            return function_name, info

    async def process_module(self, module_name: str, functions: dict,
                             f_query: str, profiler: cProfile.Profile) -> tuple[str, ModuleInfo]:
        start_time = time.time()

        with profile_section(profiler, hasattr(self, 'enable_profiling') and self.enable_profiling):
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_function(module_name, fname, fdata, profiler)
                    for fname, fdata in functions.items()
                    if fname.startswith(f_query)
                ]

                if not tasks:
                    return module_name, ModuleInfo()

                results = await asyncio.gather(*tasks)

                # Combine results from all functions in the module
                combined_info = ModuleInfo()
                total_execution_time = 0

                for _, info in results:
                    combined_info.functions_run += info.functions_run
                    combined_info.functions_fatal_error += info.functions_fatal_error
                    combined_info.error += info.error
                    combined_info.functions_sug += info.functions_sug
                    combined_info.calls.update(info.calls)
                    combined_info.callse.update(info.callse)
                    combined_info.coverage[0] += info.coverage[0]
                    combined_info.coverage[1] += info.coverage[1]
                    total_execution_time += info.execution_time

                combined_info.execution_time = time.time() - start_time
                return module_name, combined_info

    async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
        """
        Execute all functions with parallel processing and optional profiling.

        Args:
            m_query (str): Module name query filter
            f_query (str): Function name query filter
            enable_profiling (bool): Enable detailed profiling information
        """
        print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

        start_time = time.time()
        stats = ExecutionStats()
        items = list(self.functions.items()).copy()

        # Set up profiling
        self.enable_profiling = enable_profiling
        profiler = cProfile.Profile()

        with profile_section(profiler, enable_profiling):
            # Filter modules based on query
            filtered_modules = [
                (mname, mfuncs) for mname, mfuncs in items
                if mname.startswith(m_query)
            ]

            stats.modular_run = len(filtered_modules)

            # Process all modules concurrently
            async with asyncio.Semaphore(mp.cpu_count()):
                tasks = [
                    self.process_module(mname, mfuncs, f_query, profiler)
                    for mname, mfuncs in filtered_modules
                ]

                results = await asyncio.gather(*tasks)

            # Combine results and calculate statistics
            for module_name, info in results:
                if info.functions_run == info.functions_sug:
                    stats.modular_sug += 1
                else:
                    stats.modular_fatal_error += 1

                stats.errors += info.error

                # Calculate coverage
                coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
                stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

                # Store module info
                stats.__dict__[module_name] = info

            # Calculate total coverage
            total_coverage = (
                sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
                if stats.coverage else 0
            )

            stats.total_execution_time = time.time() - start_time

            # Generate profiling stats if enabled
            if enable_profiling:
                s = io.StringIO()
                ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
                ps.print_stats()
                stats.profiling_data = {
                    'detailed_stats': s.getvalue(),
                    'total_time': stats.total_execution_time,
                    'function_count': stats.modular_run,
                    'successful_functions': stats.modular_sug
                }

            print(
                f"\n{stats.modular_run=}"
                f"\n{stats.modular_sug=}"
                f"\n{stats.modular_fatal_error=}"
                f"\n{total_coverage=}"
                f"\nTotal execution time: {stats.total_execution_time:.2f}s"
            )

            if enable_profiling:
                print("\nProfiling Summary:")
                print(f"{'=' * 50}")
                print("Top 10 time-consuming functions:")
                ps.print_stats(10)

            analyzed_data = analyze_data(stats.__dict__)
            return Result.ok(data=stats.__dict__, data_info=analyzed_data)
debug property writable

proxi attr

prefix = prefix instance-attribute

proxi attr

a_exit() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1426
1427
async def a_exit(self):
    """proxi attr"""
a_fuction_runner(function, function_data, args, kwargs) async

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1491
1492
1493
1494
1495
1496
1497
1498
1499
async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
a_remove_mod(mod_name, spec='app', delete=True) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1417
1418
async def a_remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
a_run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1513
1514
1515
1516
1517
1518
async def a_run_any(self, mod_function_name: Enum or str or tuple,
                    backwords_compability_variabel_string_holder=None,
                    get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                    kwargs_=None,
                    *args, **kwargs):
    """proxi attr"""
a_run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs) async

proxi attr

Source code in toolboxv2/utils/system/types.py
1471
1472
1473
1474
1475
1476
1477
1478
1479
async def a_run_function(self, mod_function_name: Enum or tuple,
                         tb_run_function_with_state=True,
                         tb_run_with_specification='app',
                         args_=None,
                         kwargs_=None,
                         *args,
                         **kwargs) -> Result:

    """proxi attr"""
debug_rains(e)

proxi attr

Source code in toolboxv2/utils/system/types.py
1308
1309
def debug_rains(self, e):
    """proxi attr"""
disconnect(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1296
1297
1298
@staticmethod
async def disconnect(*args, **kwargs):
    """proxi attr"""
execute_all_functions(m_query='', f_query='', enable_profiling=True) async

Execute all functions with parallel processing and optional profiling.

Parameters:

Name Type Description Default
m_query str

Module name query filter

''
f_query str

Function name query filter

''
enable_profiling bool

Enable detailed profiling information

True
Source code in toolboxv2/utils/system/types.py
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
async def execute_all_functions(self, m_query='', f_query='', enable_profiling=True):
    """
    Execute all functions with parallel processing and optional profiling.

    Args:
        m_query (str): Module name query filter
        f_query (str): Function name query filter
        enable_profiling (bool): Enable detailed profiling information
    """
    print("Executing all functions in parallel" + (" with profiling" if enable_profiling else ""))

    start_time = time.time()
    stats = ExecutionStats()
    items = list(self.functions.items()).copy()

    # Set up profiling
    self.enable_profiling = enable_profiling
    profiler = cProfile.Profile()

    with profile_section(profiler, enable_profiling):
        # Filter modules based on query
        filtered_modules = [
            (mname, mfuncs) for mname, mfuncs in items
            if mname.startswith(m_query)
        ]

        stats.modular_run = len(filtered_modules)

        # Process all modules concurrently
        async with asyncio.Semaphore(mp.cpu_count()):
            tasks = [
                self.process_module(mname, mfuncs, f_query, profiler)
                for mname, mfuncs in filtered_modules
            ]

            results = await asyncio.gather(*tasks)

        # Combine results and calculate statistics
        for module_name, info in results:
            if info.functions_run == info.functions_sug:
                stats.modular_sug += 1
            else:
                stats.modular_fatal_error += 1

            stats.errors += info.error

            # Calculate coverage
            coverage = (info.coverage[1] / info.coverage[0]) if info.coverage[0] > 0 else 0
            stats.coverage.append(f"{module_name}:{coverage:.2f}\n")

            # Store module info
            stats.__dict__[module_name] = info

        # Calculate total coverage
        total_coverage = (
            sum(float(t.split(":")[-1]) for t in stats.coverage) / len(stats.coverage)
            if stats.coverage else 0
        )

        stats.total_execution_time = time.time() - start_time

        # Generate profiling stats if enabled
        if enable_profiling:
            s = io.StringIO()
            ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
            ps.print_stats()
            stats.profiling_data = {
                'detailed_stats': s.getvalue(),
                'total_time': stats.total_execution_time,
                'function_count': stats.modular_run,
                'successful_functions': stats.modular_sug
            }

        print(
            f"\n{stats.modular_run=}"
            f"\n{stats.modular_sug=}"
            f"\n{stats.modular_fatal_error=}"
            f"\n{total_coverage=}"
            f"\nTotal execution time: {stats.total_execution_time:.2f}s"
        )

        if enable_profiling:
            print("\nProfiling Summary:")
            print(f"{'=' * 50}")
            print("Top 10 time-consuming functions:")
            ps.print_stats(10)

        analyzed_data = analyze_data(stats.__dict__)
        return Result.ok(data=stats.__dict__, data_info=analyzed_data)
exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1420
1421
def exit(self):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1284
1285
1286
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
fuction_runner(function, function_data, args, kwargs, t0=0.0)

parameters = function_data.get('params') modular_name = function_data.get('module_name') function_name = function_data.get('func_name') mod_function_name = f"{modular_name}.{function_name}"

proxi attr

Source code in toolboxv2/utils/system/types.py
1481
1482
1483
1484
1485
1486
1487
1488
1489
def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):
    """
    parameters = function_data.get('params')
    modular_name = function_data.get('module_name')
    function_name = function_data.get('func_name')
    mod_function_name = f"{modular_name}.{function_name}"

    proxi attr
    """
get_all_mods(working_dir='mods', path_to='./runtime')

proxi attr

Source code in toolboxv2/utils/system/types.py
1391
1392
def get_all_mods(self, working_dir="mods", path_to="./runtime"):
    """proxi attr"""
get_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1698
1699
def get_autocompletion_dict(self):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/system/types.py
1432
1433
1434
1435
1436
1437
1438
1439
1440
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
get_mod(name, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1520
1521
def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
    """proxi attr"""
get_username(get_input=False, default='loot')

proxi attr

Source code in toolboxv2/utils/system/types.py
1701
1702
def get_username(self, get_input=False, default="loot") -> str:
    """proxi attr"""
hide_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1288
1289
1290
@staticmethod
async def hide_console(*args, **kwargs):
    """proxi attr"""
inplace_load_instance(mod_name, loc='toolboxv2.mods.', spec='app', save=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1357
1358
def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True):
    """proxi attr"""
load_all_mods_in_file(working_dir='mods') async

proxi attr

Source code in toolboxv2/utils/system/types.py
1388
1389
async def load_all_mods_in_file(self, working_dir="mods"):
    """proxi attr"""
load_external_mods() async

proxi attr

Source code in toolboxv2/utils/system/types.py
1385
1386
async def load_external_mods(self):
    """proxi attr"""
load_mod(mod_name, mlm='I', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1379
1380
def load_mod(self, mod_name: str, mlm='I', **kwargs):
    """proxi attr"""
mod_online(mod_name, installed=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1366
1367
def mod_online(self, mod_name, installed=False):
    """proxi attr"""
print(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1523
1524
1525
@staticmethod
def print(text, *args, **kwargs):
    """proxi attr"""
print_ok()

proxi attr

Source code in toolboxv2/utils/system/types.py
1404
1405
1406
def print_ok(self):
    """proxi attr"""
    self.logger.info("OK")
reload_mod(mod_name, spec='app', is_file=True, loc='toolboxv2.mods.')

proxi attr

Source code in toolboxv2/utils/system/types.py
1408
1409
def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
    """proxi attr"""
remove_mod(mod_name, spec='app', delete=True)

proxi attr

Source code in toolboxv2/utils/system/types.py
1414
1415
def remove_mod(self, mod_name, spec='app', delete=True):
    """proxi attr"""
rrun_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1317
1318
def rrun_flows(self, name, **kwargs):
    """proxi attr"""
run_a_from_sync(function, *args)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1442
1443
1444
1445
def run_a_from_sync(self, function, *args):
    """
    run a async fuction
    """
run_any(mod_function_name, backwords_compability_variabel_string_holder=None, get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1507
1508
1509
1510
1511
def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
            get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
            kwargs_=None,
            *args, **kwargs):
    """proxi attr"""
run_bg_task(task)

run a async fuction

Source code in toolboxv2/utils/system/types.py
1457
1458
1459
1460
def run_bg_task(self, task):
    """
            run a async fuction
            """
run_bg_task_advanced(task, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1447
1448
1449
1450
def run_bg_task_advanced(self, task, *args, **kwargs):
    """
    proxi attr
    """
run_flows(name, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1314
1315
def run_flows(self, name, **kwargs):
    """proxi attr"""
run_function(mod_function_name, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None, kwargs_=None, *args, **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1461
1462
1463
1464
1465
1466
1467
1468
1469
def run_function(self, mod_function_name: Enum or tuple,
                 tb_run_function_with_state=True,
                 tb_run_with_specification='app',
                 args_=None,
                 kwargs_=None,
                 *args,
                 **kwargs) -> Result:

    """proxi attr"""
run_http(mod_function_name, function_name=None, method='GET', args_=None, kwargs_=None, *args, **kwargs) async

run a function remote via http / https

Source code in toolboxv2/utils/system/types.py
1501
1502
1503
1504
1505
async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None, method="GET",
                   args_=None,
                   kwargs_=None,
                   *args, **kwargs):
    """run a function remote via http / https"""
save_autocompletion_dict()

proxi attr

Source code in toolboxv2/utils/system/types.py
1695
1696
def save_autocompletion_dict(self):
    """proxi attr"""
save_exit()

proxi attr

Source code in toolboxv2/utils/system/types.py
1376
1377
def save_exit(self):
    """proxi attr"""
save_initialized_module(tools_class, spec)

proxi attr

Source code in toolboxv2/utils/system/types.py
1363
1364
def save_initialized_module(self, tools_class, spec):
    """proxi attr"""
save_instance(instance, modular_id, spec='app', instance_type='file/application', tools_class=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1360
1361
def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):
    """proxi attr"""
save_load(modname, spec='app')

proxi attr

Source code in toolboxv2/utils/system/types.py
1429
1430
def save_load(self, modname, spec='app'):
    """proxi attr"""
save_registry_as_enums(directory, filename)

proxi attr

Source code in toolboxv2/utils/system/types.py
1704
1705
def save_registry_as_enums(self, directory: str, filename: str):
    """proxi attr"""
set_flows(r)

proxi attr

Source code in toolboxv2/utils/system/types.py
1311
1312
def set_flows(self, r):
    """proxi attr"""
set_logger(debug=False)

proxi attr

Source code in toolboxv2/utils/system/types.py
1300
1301
def set_logger(self, debug=False):
    """proxi attr"""
show_console(*args, **kwargs) async staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1292
1293
1294
@staticmethod
async def show_console(*args, **kwargs):
    """proxi attr"""
sprint(text, *args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1527
1528
1529
@staticmethod
def sprint(text, *args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, row=False, request_as_kwarg=False, state=None, level=0, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None, websocket_handler=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

0
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/system/types.py
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str or None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       row=False,
       request_as_kwarg: bool = False,
       state: bool or None = None,
       level: int = 0,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       websocket_handler: str | None = None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  row=row,
                                  request_as_kwarg=request_as_kwarg,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl)
wait_for_bg_tasks(timeout=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1452
1453
1454
1455
def wait_for_bg_tasks(self, timeout=None):
    """
    proxi attr
    """
watch_mod(mod_name, spec='app', loc='toolboxv2.mods.', use_thread=True, path_name=None)

proxi attr

Source code in toolboxv2/utils/system/types.py
1411
1412
def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None):
    """proxi attr"""
web_context()

returns the build index ( toolbox web component )

Source code in toolboxv2/utils/system/types.py
1423
1424
def web_context(self) -> str:
    """returns the build index ( toolbox web component )"""
Headers dataclass

Class representing HTTP headers with strongly typed common fields.

Source code in toolboxv2/utils/system/types.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
@dataclass
class Headers:
    """Class representing HTTP headers with strongly typed common fields."""
    # General Headers
    accept: None | str= None
    accept_charset: None | str= None
    accept_encoding: None | str= None
    accept_language: None | str= None
    accept_ranges: None | str= None
    access_control_allow_credentials: None | str= None
    access_control_allow_headers: None | str= None
    access_control_allow_methods: None | str= None
    access_control_allow_origin: None | str= None
    access_control_expose_headers: None | str= None
    access_control_max_age: None | str= None
    access_control_request_headers: None | str= None
    access_control_request_method: None | str= None
    age: None | str= None
    allow: None | str= None
    alt_svc: None | str= None
    authorization: None | str= None
    cache_control: None | str= None
    clear_site_data: None | str= None
    connection: None | str= None
    content_disposition: None | str= None
    content_encoding: None | str= None
    content_language: None | str= None
    content_length: None | str= None
    content_location: None | str= None
    content_range: None | str= None
    content_security_policy: None | str= None
    content_security_policy_report_only: None | str= None
    content_type: None | str= None
    cookie: None | str= None
    cross_origin_embedder_policy: None | str= None
    cross_origin_opener_policy: None | str= None
    cross_origin_resource_policy: None | str= None
    date: None | str= None
    device_memory: None | str= None
    digest: None | str= None
    dnt: None | str= None
    dpr: None | str= None
    etag: None | str= None
    expect: None | str= None
    expires: None | str= None
    feature_policy: None | str= None
    forwarded: None | str= None
    from_header: None | str= None  # 'from' is a Python keyword
    host: None | str= None
    if_match: None | str= None
    if_modified_since: None | str= None
    if_none_match: None | str= None
    if_range: None | str= None
    if_unmodified_since: None | str= None
    keep_alive: None | str= None
    large_allocation: None | str= None
    last_modified: None | str= None
    link: None | str= None
    location: None | str= None
    max_forwards: None | str= None
    origin: None | str= None
    pragma: None | str= None
    proxy_authenticate: None | str= None
    proxy_authorization: None | str= None
    public_key_pins: None | str= None
    public_key_pins_report_only: None | str= None
    range: None | str= None
    referer: None | str= None
    referrer_policy: None | str= None
    retry_after: None | str= None
    save_data: None | str= None
    sec_fetch_dest: None | str= None
    sec_fetch_mode: None | str= None
    sec_fetch_site: None | str= None
    sec_fetch_user: None | str= None
    sec_websocket_accept: None | str= None
    sec_websocket_extensions: None | str= None
    sec_websocket_key: None | str= None
    sec_websocket_protocol: None | str= None
    sec_websocket_version: None | str= None
    server: None | str= None
    server_timing: None | str= None
    service_worker_allowed: None | str= None
    set_cookie: None | str= None
    sourcemap: None | str= None
    strict_transport_security: None | str= None
    te: None | str= None
    timing_allow_origin: None | str= None
    tk: None | str= None
    trailer: None | str= None
    transfer_encoding: None | str= None
    upgrade: None | str= None
    upgrade_insecure_requests: None | str= None
    user_agent: None | str= None
    vary: None | str= None
    via: None | str= None
    warning: None | str= None
    www_authenticate: None | str= None
    x_content_type_options: None | str= None
    x_dns_prefetch_control: None | str= None
    x_forwarded_for: None | str= None
    x_forwarded_host: None | str= None
    x_forwarded_proto: None | str= None
    x_frame_options: None | str= None
    x_xss_protection: None | str= None

    # Browser-specific and custom headers
    sec_ch_ua: None | str= None
    sec_ch_ua_mobile: None | str= None
    sec_ch_ua_platform: None | str= None
    sec_ch_ua_arch: None | str= None
    sec_ch_ua_bitness: None | str= None
    sec_ch_ua_full_version: None | str= None
    sec_ch_ua_full_version_list: None | str= None
    sec_ch_ua_platform_version: None | str= None

    # HTMX specific headers
    hx_boosted: None | str= None
    hx_current_url: None | str= None
    hx_history_restore_request: None | str= None
    hx_prompt: None | str= None
    hx_request: None | str= None
    hx_target: None | str= None
    hx_trigger: None | str= None
    hx_trigger_name: None | str= None

    # Additional fields can be stored in extra_headers
    extra_headers: dict[str, str] = field(default_factory=dict)

    def __post_init__(self):
        """Convert header keys with hyphens to underscores for attribute access."""
        # Handle the 'from' header specifically since it's a Python keyword
        if 'from' in self.__dict__:
            self.from_header = self.__dict__.pop('from')

        # Store any attributes that weren't explicitly defined in extra_headers
        all_attrs = self.__annotations__.keys()
        for key in list(self.__dict__.keys()):
            if key not in all_attrs and key != "extra_headers":
                self.extra_headers[key.replace("_", "-")] = getattr(self, key)
                delattr(self, key)

    @classmethod
    def from_dict(cls, headers_dict: dict[str, str]) -> 'Headers':
        """Create a Headers instance from a dictionary."""
        # Convert header keys from hyphenated to underscore format for Python attributes
        processed_headers = {}
        extra_headers = {}

        for key, value in headers_dict.items():
            # Handle 'from' header specifically
            if key.lower() == 'from':
                processed_headers['from_header'] = value
                continue

            python_key = key.replace("-", "_").lower()
            if python_key in cls.__annotations__ and python_key != "extra_headers":
                processed_headers[python_key] = value
            else:
                extra_headers[key] = value

        return cls(**processed_headers, extra_headers=extra_headers)

    def to_dict(self) -> dict[str, str]:
        """Convert the Headers object back to a dictionary."""
        result = {}

        # Add regular attributes
        for key, value in self.__dict__.items():
            if key != "extra_headers" and value is not None:
                # Handle from_header specially
                if key == "from_header":
                    result["from"] = value
                else:
                    result[key.replace("_", "-")] = value

        # Add extra headers
        result.update(self.extra_headers)

        return result
__post_init__()

Convert header keys with hyphens to underscores for attribute access.

Source code in toolboxv2/utils/system/types.py
160
161
162
163
164
165
166
167
168
169
170
171
def __post_init__(self):
    """Convert header keys with hyphens to underscores for attribute access."""
    # Handle the 'from' header specifically since it's a Python keyword
    if 'from' in self.__dict__:
        self.from_header = self.__dict__.pop('from')

    # Store any attributes that weren't explicitly defined in extra_headers
    all_attrs = self.__annotations__.keys()
    for key in list(self.__dict__.keys()):
        if key not in all_attrs and key != "extra_headers":
            self.extra_headers[key.replace("_", "-")] = getattr(self, key)
            delattr(self, key)
from_dict(headers_dict) classmethod

Create a Headers instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
@classmethod
def from_dict(cls, headers_dict: dict[str, str]) -> 'Headers':
    """Create a Headers instance from a dictionary."""
    # Convert header keys from hyphenated to underscore format for Python attributes
    processed_headers = {}
    extra_headers = {}

    for key, value in headers_dict.items():
        # Handle 'from' header specifically
        if key.lower() == 'from':
            processed_headers['from_header'] = value
            continue

        python_key = key.replace("-", "_").lower()
        if python_key in cls.__annotations__ and python_key != "extra_headers":
            processed_headers[python_key] = value
        else:
            extra_headers[key] = value

    return cls(**processed_headers, extra_headers=extra_headers)
to_dict()

Convert the Headers object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
def to_dict(self) -> dict[str, str]:
    """Convert the Headers object back to a dictionary."""
    result = {}

    # Add regular attributes
    for key, value in self.__dict__.items():
        if key != "extra_headers" and value is not None:
            # Handle from_header specially
            if key == "from_header":
                result["from"] = value
            else:
                result[key.replace("_", "-")] = value

    # Add extra headers
    result.update(self.extra_headers)

    return result
MainToolType
Source code in toolboxv2/utils/system/types.py
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
class MainToolType:
    toolID: str
    app: A
    interface: ToolBoxInterfaces
    spec: str

    version: str
    tools: dict  # legacy
    name: str
    logger: logging
    color: str
    todo: Callable
    _on_exit: Callable
    stuf: bool
    config: dict
    user: U | None
    description: str

    @staticmethod
    def return_result(error: ToolBoxError = ToolBoxError.none,
                      exec_code: int = 0,
                      help_text: str = "",
                      data_info=None,
                      data=None,
                      data_to=None) -> Result:
        """proxi attr"""

    def load(self):
        """proxi attr"""

    def print(self, message, end="\n", **kwargs):
        """proxi attr"""

    def add_str_to_config(self, command):
        if len(command) != 2:
            self.logger.error('Invalid command must be key value')
            return False
        self.config[command[0]] = command[1]

    def webInstall(self, user_instance, construct_render) -> str:
        """"Returns a web installer for the given user instance and construct render template"""

    async def get_user(self, username: str) -> Result:
        return self.app.a_run_any(CLOUDM_AUTHMANAGER.GET_USER_BY_NAME, username=username, get_results=True)
load()

proxi attr

Source code in toolboxv2/utils/system/types.py
1183
1184
def load(self):
    """proxi attr"""
print(message, end='\n', **kwargs)

proxi attr

Source code in toolboxv2/utils/system/types.py
1186
1187
def print(self, message, end="\n", **kwargs):
    """proxi attr"""
return_result(error=ToolBoxError.none, exec_code=0, help_text='', data_info=None, data=None, data_to=None) staticmethod

proxi attr

Source code in toolboxv2/utils/system/types.py
1174
1175
1176
1177
1178
1179
1180
1181
@staticmethod
def return_result(error: ToolBoxError = ToolBoxError.none,
                  exec_code: int = 0,
                  help_text: str = "",
                  data_info=None,
                  data=None,
                  data_to=None) -> Result:
    """proxi attr"""
webInstall(user_instance, construct_render)

"Returns a web installer for the given user instance and construct render template

Source code in toolboxv2/utils/system/types.py
1195
1196
def webInstall(self, user_instance, construct_render) -> str:
    """"Returns a web installer for the given user instance and construct render template"""
Request dataclass

Class representing an HTTP request.

Source code in toolboxv2/utils/system/types.py
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
@dataclass
class Request:
    """Class representing an HTTP request."""
    content_type: str
    headers: Headers
    method: str
    path: str
    query_params: dict[str, Any] = field(default_factory=dict)
    form_data: dict[str, Any] | None = None
    body: Any | None = None

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'Request':
        """Create a Request instance from a dictionary."""
        headers = Headers.from_dict(data.get('headers', {}))

        # Extract other fields
        return cls(
            content_type=data.get('content_type', ''),
            headers=headers,
            method=data.get('method', ''),
            path=data.get('path', ''),
            query_params=data.get('query_params', {}),
            form_data=data.get('form_data'),
            body=data.get('body')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the Request object back to a dictionary."""
        result = {
            'content_type': self.content_type,
            'headers': self.headers.to_dict(),
            'method': self.method,
            'path': self.path,
            'query_params': self.query_params,
        }

        if self.form_data is not None:
            result['form_data'] = self.form_data

        if self.body is not None:
            result['body'] = self.body

        return result
from_dict(data) classmethod

Create a Request instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'Request':
    """Create a Request instance from a dictionary."""
    headers = Headers.from_dict(data.get('headers', {}))

    # Extract other fields
    return cls(
        content_type=data.get('content_type', ''),
        headers=headers,
        method=data.get('method', ''),
        path=data.get('path', ''),
        query_params=data.get('query_params', {}),
        form_data=data.get('form_data'),
        body=data.get('body')
    )
to_dict()

Convert the Request object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
def to_dict(self) -> dict[str, Any]:
    """Convert the Request object back to a dictionary."""
    result = {
        'content_type': self.content_type,
        'headers': self.headers.to_dict(),
        'method': self.method,
        'path': self.path,
        'query_params': self.query_params,
    }

    if self.form_data is not None:
        result['form_data'] = self.form_data

    if self.body is not None:
        result['body'] = self.body

    return result
RequestData dataclass

Main class representing the complete request data structure.

Source code in toolboxv2/utils/system/types.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
@dataclass
class RequestData:
    """Main class representing the complete request data structure."""
    request: Request
    session: Session
    session_id: str

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
        """Create a RequestData instance from a dictionary."""
        return cls(
            request=Request.from_dict(data.get('request', {})),
            session=Session.from_dict(data.get('session', {})),
            session_id=data.get('session_id', '')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the RequestData object back to a dictionary."""
        return {
            'request': self.request.to_dict(),
            'session': self.session.to_dict(),
            'session_id': self.session_id
        }

    def __getattr__(self, name: str) -> Any:
        """Delegate unknown attributes to the `request` object."""
        # Nur wenn das Attribut nicht direkt in RequestData existiert
        # und auch nicht `session` oder `session_id` ist
        if hasattr(self.request, name):
            return getattr(self.request, name)
        raise AttributeError(f"'RequestData' object has no attribute '{name}'")

    @classmethod
    def moc(cls):
        return cls(
            request=Request.from_dict({
                'content_type': 'application/x-www-form-urlencoded',
                'headers': {
                    'accept': '*/*',
                    'accept-encoding': 'gzip, deflate, br, zstd',
                    'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
                    'connection': 'keep-alive',
                    'content-length': '107',
                    'content-type': 'application/x-www-form-urlencoded',
                    'cookie': 'session=abc123',
                    'host': 'localhost:8080',
                    'hx-current-url': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'hx-request': 'true',
                    'hx-target': 'estimates-guest_1fc2c9',
                    'hx-trigger': 'config-form-guest_1fc2c9',
                    'origin': 'http://localhost:8080',
                    'referer': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"Windows"',
                    'sec-fetch-dest': 'empty',
                    'sec-fetch-mode': 'cors',
                    'sec-fetch-site': 'same-origin',
                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                },
                'method': 'POST',
                'path': '/api/TruthSeeker/update_estimates',
                'query_params': {},
                'form_data': {
                    'param1': 'value1',
                    'param2': 'value2'
                }
            }),
            session=Session.from_dict({
                'SiID': '29a2e258e18252e2afd5ff943523f09c82f1bb9adfe382a6f33fc6a8381de898',
                'level': '1',
                'spec': '74eed1c8de06886842e235486c3c2fd6bcd60586998ac5beb87f13c0d1750e1d',
                'user_name': 'root',
                'custom_field': 'custom_value'
            }),
            session_id='0x29dd1ac0d1e30d3f'
        )
__getattr__(name)

Delegate unknown attributes to the request object.

Source code in toolboxv2/utils/system/types.py
325
326
327
328
329
330
331
def __getattr__(self, name: str) -> Any:
    """Delegate unknown attributes to the `request` object."""
    # Nur wenn das Attribut nicht direkt in RequestData existiert
    # und auch nicht `session` oder `session_id` ist
    if hasattr(self.request, name):
        return getattr(self.request, name)
    raise AttributeError(f"'RequestData' object has no attribute '{name}'")
from_dict(data) classmethod

Create a RequestData instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
308
309
310
311
312
313
314
315
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
    """Create a RequestData instance from a dictionary."""
    return cls(
        request=Request.from_dict(data.get('request', {})),
        session=Session.from_dict(data.get('session', {})),
        session_id=data.get('session_id', '')
    )
to_dict()

Convert the RequestData object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
317
318
319
320
321
322
323
def to_dict(self) -> dict[str, Any]:
    """Convert the RequestData object back to a dictionary."""
    return {
        'request': self.request.to_dict(),
        'session': self.session.to_dict(),
        'session_id': self.session_id
    }
Result
Source code in toolboxv2/utils/system/types.py
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        return self.info.exec_code != 200

    def is_ok(self):
        return not self.is_error()

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: dict | None = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + f'Data_{self.result.data_type}: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{(data[:100]+'...') if not data.endswith('NO Data') else ''}\n")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task
binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)
file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)
json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
844
845
846
847
848
849
850
851
852
853
854
855
856
857
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)
redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
943
944
945
946
947
948
949
950
951
952
953
954
955
956
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)
sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )
stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers dict | None

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: dict | None = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)
text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)
SSEGenerator

Production-ready SSE generator that converts any data source to properly formatted Server-Sent Events compatible with browsers.

Source code in toolboxv2/utils/system/types.py
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
class SSEGenerator:
    """
    Production-ready SSE generator that converts any data source to
    properly formatted Server-Sent Events compatible with browsers.
    """

    @staticmethod
    def format_sse_event(data: Any) -> str:
        """Format any data as a proper SSE event message."""
        # Already formatted as SSE
        if isinstance(data, str) and (data.startswith('data:') or data.startswith('event:')) and '\n\n' in data:
            return data

        # Handle bytes (binary data)
        if isinstance(data, bytes):
            try:
                # Try to decode as UTF-8 first
                decoded_data_str = data.decode('utf-8')
                # If decoding works, treat it as a string for further processing
                # This allows binary data that is valid UTF-8 JSON to be processed as JSON.
                data = decoded_data_str
            except UnicodeDecodeError:
                # Binary data that is not UTF-8, encode as base64
                b64_data = base64.b64encode(data).decode('utf-8')
                return f"event: binary\ndata: {b64_data}\n\n"

        # Convert non-string objects (that are not already bytes) to JSON string
        # If data was bytes and successfully decoded to UTF-8 string, it will be processed here.
        original_data_type_was_complex = False
        if not isinstance(data, str):
            original_data_type_was_complex = True
            try:
                data_str = json.dumps(data)
            except Exception:
                data_str = str(data)  # Fallback to string representation
        else:
            data_str = data  # data is already a string

        # Handle JSON data with special event formatting
        # data_str now holds the string representation (either original string or JSON string)
        if data_str.strip().startswith('{'):
            try:
                json_data = json.loads(data_str)
                if isinstance(json_data, dict) and 'event' in json_data:
                    event_type = json_data['event']
                    event_id = json_data.get('id', None)  # Use None to distinguish from empty string

                    # Determine the actual data payload for the SSE 'data:' field
                    # If 'data' key exists in json_data, use its content.
                    # Otherwise, use the original data_str (which is the JSON of json_data).
                    if 'data' in json_data:
                        payload_content = json_data['data']
                        # If payload_content is complex, re-serialize it to JSON string
                        if isinstance(payload_content, dict | list):
                            sse_data_field = json.dumps(payload_content)
                        else:  # Simple type (string, number, bool)
                            sse_data_field = str(payload_content)
                    else:
                        # If original data was complex (e.g. dict) and became json_data,
                        # and no 'data' key in it, then use the full json_data as payload.
                        # If original data was a simple string that happened to be JSON parsable
                        # but without 'event' key, it would have been handled by "Regular JSON without event"
                        # or "Plain text" later.
                        # This path implies original data was a dict with 'event' key.
                        sse_data_field = data_str

                    sse_lines = []
                    if event_type:  # Should always be true here
                        sse_lines.append(f"event: {event_type}")
                    if event_id is not None:  # Check for None, allow empty string id
                        sse_lines.append(f"id: {event_id}")

                    # Handle multi-line data for the data field
                    for line in sse_data_field.splitlines():
                        sse_lines.append(f"data: {line}")

                    return "\n".join(sse_lines) + "\n\n"
                else:
                    # Regular JSON without special 'event' key
                    sse_lines = []
                    for line in data_str.splitlines():
                        sse_lines.append(f"data: {line}")
                    return "\n".join(sse_lines) + "\n\n"
            except json.JSONDecodeError:
                # Not valid JSON, treat as plain text
                sse_lines = []
                for line in data_str.splitlines():
                    sse_lines.append(f"data: {line}")
                return "\n".join(sse_lines) + "\n\n"
        else:
            # Plain text
            sse_lines = []
            for line in data_str.splitlines():
                sse_lines.append(f"data: {line}")
            return "\n".join(sse_lines) + "\n\n"

    @classmethod
    async def wrap_sync_generator(cls, generator):
        """Convert a synchronous generator to an async generator."""
        for item in generator:
            yield item
            # Allow other tasks to run
            await asyncio.sleep(0)

    @classmethod
    async def create_sse_stream(
        cls,
        source: Any,  # Changed from positional arg to keyword for clarity in Result.stream
        cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None
    ) -> AsyncGenerator[str, None]:
        """
        Convert any source to a properly formatted SSE stream.

        Args:
            source: Can be async generator, sync generator, iterable, or a single item.
            cleanup_func: Optional function to call when the stream ends or is cancelled.
                          Can be a synchronous function, async function, or async generator.

        Yields:
            Properly formatted SSE messages (strings).
        """
        # Send stream start event
        # This structure ensures data field contains {"id":"0"}
        yield cls.format_sse_event({"event": "stream_start", "data": {"id": "0"}})

        try:
            # Handle different types of sources
            if inspect.isasyncgen(source):
                # Source is already an async generator
                async for item in source:
                    yield cls.format_sse_event(item)
            elif inspect.isgenerator(source) or (not isinstance(source, str) and hasattr(source, '__iter__')):
                # Source is a sync generator or iterable (but not a string)
                # Strings are iterable but should be treated as single items unless explicitly made a generator
                async for item in cls.wrap_sync_generator(source):
                    yield cls.format_sse_event(item)
            else:
                # Single item (including strings)
                yield cls.format_sse_event(source)
        except asyncio.CancelledError:
            # Client disconnected
            yield cls.format_sse_event({"event": "cancelled", "data": {"id": "cancelled"}})
            raise
        except Exception as e:
            # Error in stream
            error_info = {
                "event": "error",
                "data": {  # Ensure payload is under 'data' key for the new format_sse_event logic
                    "message": str(e),
                    "traceback": traceback.format_exc()
                }
            }
            yield cls.format_sse_event(error_info)
        finally:
            # Always send end event
            yield cls.format_sse_event({"event": "stream_end", "data": {"id": "final"}})

            # Execute cleanup function if provided
            if cleanup_func:
                try:
                    if inspect.iscoroutinefunction(cleanup_func):  # Check if it's an async def function
                        await cleanup_func()
                    elif inspect.isasyncgenfunction(cleanup_func) or inspect.isasyncgen(
                        cleanup_func):  # Check if it's an async def generator function or already an async generator
                        # If it's a function, call it to get the generator
                        gen_to_exhaust = cleanup_func() if inspect.isasyncgenfunction(cleanup_func) else cleanup_func
                        async for _ in gen_to_exhaust:
                            pass  # Exhaust the generator to ensure cleanup completes
                    else:
                        # Synchronous function
                        cleanup_func()
                except Exception as e:
                    # Log cleanup errors but don't propagate them to client
                    error_info_cleanup = {
                        "event": "cleanup_error",
                        "data": {  # Ensure payload is under 'data' key
                            "message": str(e),
                            "traceback": traceback.format_exc()
                        }
                    }
                    # We can't yield here as the stream is already closing/closed.
                    # Instead, log the error.
                    # In a real app, use a proper logger.
                    print(f"SSE cleanup error: {cls.format_sse_event(error_info_cleanup)}", flush=True)
create_sse_stream(source, cleanup_func=None) async classmethod

Convert any source to a properly formatted SSE stream.

Parameters:

Name Type Description Default
source Any

Can be async generator, sync generator, iterable, or a single item.

required
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional function to call when the stream ends or is cancelled. Can be a synchronous function, async function, or async generator.

None

Yields:

Type Description
AsyncGenerator[str, None]

Properly formatted SSE messages (strings).

Source code in toolboxv2/utils/system/types.py
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
@classmethod
async def create_sse_stream(
    cls,
    source: Any,  # Changed from positional arg to keyword for clarity in Result.stream
    cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None
) -> AsyncGenerator[str, None]:
    """
    Convert any source to a properly formatted SSE stream.

    Args:
        source: Can be async generator, sync generator, iterable, or a single item.
        cleanup_func: Optional function to call when the stream ends or is cancelled.
                      Can be a synchronous function, async function, or async generator.

    Yields:
        Properly formatted SSE messages (strings).
    """
    # Send stream start event
    # This structure ensures data field contains {"id":"0"}
    yield cls.format_sse_event({"event": "stream_start", "data": {"id": "0"}})

    try:
        # Handle different types of sources
        if inspect.isasyncgen(source):
            # Source is already an async generator
            async for item in source:
                yield cls.format_sse_event(item)
        elif inspect.isgenerator(source) or (not isinstance(source, str) and hasattr(source, '__iter__')):
            # Source is a sync generator or iterable (but not a string)
            # Strings are iterable but should be treated as single items unless explicitly made a generator
            async for item in cls.wrap_sync_generator(source):
                yield cls.format_sse_event(item)
        else:
            # Single item (including strings)
            yield cls.format_sse_event(source)
    except asyncio.CancelledError:
        # Client disconnected
        yield cls.format_sse_event({"event": "cancelled", "data": {"id": "cancelled"}})
        raise
    except Exception as e:
        # Error in stream
        error_info = {
            "event": "error",
            "data": {  # Ensure payload is under 'data' key for the new format_sse_event logic
                "message": str(e),
                "traceback": traceback.format_exc()
            }
        }
        yield cls.format_sse_event(error_info)
    finally:
        # Always send end event
        yield cls.format_sse_event({"event": "stream_end", "data": {"id": "final"}})

        # Execute cleanup function if provided
        if cleanup_func:
            try:
                if inspect.iscoroutinefunction(cleanup_func):  # Check if it's an async def function
                    await cleanup_func()
                elif inspect.isasyncgenfunction(cleanup_func) or inspect.isasyncgen(
                    cleanup_func):  # Check if it's an async def generator function or already an async generator
                    # If it's a function, call it to get the generator
                    gen_to_exhaust = cleanup_func() if inspect.isasyncgenfunction(cleanup_func) else cleanup_func
                    async for _ in gen_to_exhaust:
                        pass  # Exhaust the generator to ensure cleanup completes
                else:
                    # Synchronous function
                    cleanup_func()
            except Exception as e:
                # Log cleanup errors but don't propagate them to client
                error_info_cleanup = {
                    "event": "cleanup_error",
                    "data": {  # Ensure payload is under 'data' key
                        "message": str(e),
                        "traceback": traceback.format_exc()
                    }
                }
                # We can't yield here as the stream is already closing/closed.
                # Instead, log the error.
                # In a real app, use a proper logger.
                print(f"SSE cleanup error: {cls.format_sse_event(error_info_cleanup)}", flush=True)
format_sse_event(data) staticmethod

Format any data as a proper SSE event message.

Source code in toolboxv2/utils/system/types.py
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
@staticmethod
def format_sse_event(data: Any) -> str:
    """Format any data as a proper SSE event message."""
    # Already formatted as SSE
    if isinstance(data, str) and (data.startswith('data:') or data.startswith('event:')) and '\n\n' in data:
        return data

    # Handle bytes (binary data)
    if isinstance(data, bytes):
        try:
            # Try to decode as UTF-8 first
            decoded_data_str = data.decode('utf-8')
            # If decoding works, treat it as a string for further processing
            # This allows binary data that is valid UTF-8 JSON to be processed as JSON.
            data = decoded_data_str
        except UnicodeDecodeError:
            # Binary data that is not UTF-8, encode as base64
            b64_data = base64.b64encode(data).decode('utf-8')
            return f"event: binary\ndata: {b64_data}\n\n"

    # Convert non-string objects (that are not already bytes) to JSON string
    # If data was bytes and successfully decoded to UTF-8 string, it will be processed here.
    original_data_type_was_complex = False
    if not isinstance(data, str):
        original_data_type_was_complex = True
        try:
            data_str = json.dumps(data)
        except Exception:
            data_str = str(data)  # Fallback to string representation
    else:
        data_str = data  # data is already a string

    # Handle JSON data with special event formatting
    # data_str now holds the string representation (either original string or JSON string)
    if data_str.strip().startswith('{'):
        try:
            json_data = json.loads(data_str)
            if isinstance(json_data, dict) and 'event' in json_data:
                event_type = json_data['event']
                event_id = json_data.get('id', None)  # Use None to distinguish from empty string

                # Determine the actual data payload for the SSE 'data:' field
                # If 'data' key exists in json_data, use its content.
                # Otherwise, use the original data_str (which is the JSON of json_data).
                if 'data' in json_data:
                    payload_content = json_data['data']
                    # If payload_content is complex, re-serialize it to JSON string
                    if isinstance(payload_content, dict | list):
                        sse_data_field = json.dumps(payload_content)
                    else:  # Simple type (string, number, bool)
                        sse_data_field = str(payload_content)
                else:
                    # If original data was complex (e.g. dict) and became json_data,
                    # and no 'data' key in it, then use the full json_data as payload.
                    # If original data was a simple string that happened to be JSON parsable
                    # but without 'event' key, it would have been handled by "Regular JSON without event"
                    # or "Plain text" later.
                    # This path implies original data was a dict with 'event' key.
                    sse_data_field = data_str

                sse_lines = []
                if event_type:  # Should always be true here
                    sse_lines.append(f"event: {event_type}")
                if event_id is not None:  # Check for None, allow empty string id
                    sse_lines.append(f"id: {event_id}")

                # Handle multi-line data for the data field
                for line in sse_data_field.splitlines():
                    sse_lines.append(f"data: {line}")

                return "\n".join(sse_lines) + "\n\n"
            else:
                # Regular JSON without special 'event' key
                sse_lines = []
                for line in data_str.splitlines():
                    sse_lines.append(f"data: {line}")
                return "\n".join(sse_lines) + "\n\n"
        except json.JSONDecodeError:
            # Not valid JSON, treat as plain text
            sse_lines = []
            for line in data_str.splitlines():
                sse_lines.append(f"data: {line}")
            return "\n".join(sse_lines) + "\n\n"
    else:
        # Plain text
        sse_lines = []
        for line in data_str.splitlines():
            sse_lines.append(f"data: {line}")
        return "\n".join(sse_lines) + "\n\n"
wrap_sync_generator(generator) async classmethod

Convert a synchronous generator to an async generator.

Source code in toolboxv2/utils/system/types.py
2098
2099
2100
2101
2102
2103
2104
@classmethod
async def wrap_sync_generator(cls, generator):
    """Convert a synchronous generator to an async generator."""
    for item in generator:
        yield item
        # Allow other tasks to run
        await asyncio.sleep(0)
Session dataclass

Class representing a session.

Source code in toolboxv2/utils/system/types.py
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
@dataclass
class Session:
    """Class representing a session."""
    SiID: str
    level: str
    spec: str
    user_name: str
    # Allow for additional fields
    extra_data: dict[str, Any] = field(default_factory=dict)

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'Session':
        """Create a Session instance from a dictionary with default values."""
        known_fields = {
            'SiID': data.get('SiID', '#0'),
            'level': data.get('level', -1),
            'spec': data.get('spec', 'app'),
            'user_name': data.get('user_name', 'anonymous'),
        }

        extra_data = {k: v for k, v in data.items() if k not in known_fields}
        return cls(**known_fields, extra_data=extra_data)

    def to_dict(self) -> dict[str, Any]:
        """Convert the Session object back to a dictionary."""
        result = {
            'SiID': self.SiID,
            'level': self.level,
            'spec': self.spec,
            'user_name': self.user_name,
        }

        # Add extra data
        result.update(self.extra_data)

        return result

    @property
    def valid(self):
        return int(self.level) > 0
from_dict(data) classmethod

Create a Session instance from a dictionary with default values.

Source code in toolboxv2/utils/system/types.py
269
270
271
272
273
274
275
276
277
278
279
280
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'Session':
    """Create a Session instance from a dictionary with default values."""
    known_fields = {
        'SiID': data.get('SiID', '#0'),
        'level': data.get('level', -1),
        'spec': data.get('spec', 'app'),
        'user_name': data.get('user_name', 'anonymous'),
    }

    extra_data = {k: v for k, v in data.items() if k not in known_fields}
    return cls(**known_fields, extra_data=extra_data)
to_dict()

Convert the Session object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
282
283
284
285
286
287
288
289
290
291
292
293
294
def to_dict(self) -> dict[str, Any]:
    """Convert the Session object back to a dictionary."""
    result = {
        'SiID': self.SiID,
        'level': self.level,
        'spec': self.spec,
        'user_name': self.user_name,
    }

    # Add extra data
    result.update(self.extra_data)

    return result
parse_request_data(data)

Parse the incoming request data into a strongly typed structure.

Source code in toolboxv2/utils/system/types.py
381
382
383
def parse_request_data(data: dict[str, Any]) -> RequestData:
    """Parse the incoming request data into a strongly typed structure."""
    return RequestData.from_dict(data)

toolbox

Main module.

App
Source code in toolboxv2/utils/toolbox.py
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
class App(AppType, metaclass=Singleton):

    def __init__(self, prefix: str = "", args=AppArgs().default()):
        super().__init__(prefix, args)
        self._web_context = None
        t0 = time.perf_counter()
        abspath = os.path.abspath(__file__)
        self.system_flag = system()  # Linux: Linux Mac: Darwin Windows: Windows

        self.appdata = os.getenv('APPDATA') if os.name == 'nt' else os.getenv('XDG_CONFIG_HOME') or os.path.expanduser(
                '~/.config') if os.name == 'posix' else None

        if self.system_flag == "Darwin" or self.system_flag == "Linux":
            dir_name = os.path.dirname(abspath).replace("/utils", "")
        else:
            dir_name = os.path.dirname(abspath).replace("\\utils", "")

        self.start_dir = str(dir_name)

        self.bg_tasks = []

        lapp = dir_name + '\\.data\\'

        if not prefix:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt") as prefix_file:
                cont = prefix_file.read()
                if cont:
                    prefix = cont.rstrip()
        else:
            if not os.path.exists(f"{lapp}last-app-prefix.txt"):
                os.makedirs(lapp, exist_ok=True)
                open(f"{lapp}last-app-prefix.txt", "a").close()
            with open(f"{lapp}last-app-prefix.txt", "w") as prefix_file:
                prefix_file.write(prefix)

        self.prefix = prefix

        node_ = node()

        if 'localhost' in node_ and (host := os.getenv('HOSTNAME', 'localhost')) != 'localhost':
            node_ = node_.replace('localhost', host)
        self.id = prefix + '-' + node_
        self.globals = {
            "root": {**globals()},
        }
        self.locals = {
            "user": {'app': self, **locals()},
        }

        identification = self.id
        collective_identification = self.id
        if "test" in prefix:
            if self.system_flag == "Darwin" or self.system_flag == "Linux":
                start_dir = self.start_dir.replace("ToolBoxV2/toolboxv2", "toolboxv2")
            else:
                start_dir = self.start_dir.replace("ToolBoxV2\\toolboxv2", "toolboxv2")
            self.data_dir = start_dir + '\\.data\\' + "test"
            self.config_dir = start_dir + '\\.config\\' + "test"
            self.info_dir = start_dir + '\\.info\\' + "test"
        elif identification.startswith('collective-'):
            collective_identification = identification.split('-')[1]
            self.data_dir = self.start_dir + '\\.data\\' + collective_identification
            self.config_dir = self.start_dir + '\\.config\\' + collective_identification
            self.info_dir = self.start_dir + '\\.info\\' + collective_identification
            self.id = collective_identification
        else:
            self.data_dir = self.start_dir + '\\.data\\' + identification
            self.config_dir = self.start_dir + '\\.config\\' + identification
            self.info_dir = self.start_dir + '\\.info\\' + identification

        if self.appdata is None:
            self.appdata = self.data_dir
        else:
            self.appdata += "/ToolBoxV2"

        if not os.path.exists(self.appdata):
            os.makedirs(self.appdata, exist_ok=True)
        if not os.path.exists(self.data_dir):
            os.makedirs(self.data_dir, exist_ok=True)
        if not os.path.exists(self.config_dir):
            os.makedirs(self.config_dir, exist_ok=True)
        if not os.path.exists(self.info_dir):
            os.makedirs(self.info_dir, exist_ok=True)

        print(f"Starting ToolBox as {prefix} from :", Style.Bold(Style.CYAN(f"{os.getcwd()}")))

        logger_info_str, self.logger, self.logging_filename = self.set_logger(args.debug)

        print("Logger " + logger_info_str)
        print("================================")
        self.logger.info("Logger initialized")
        get_logger().info(Style.GREEN("Starting Application instance"))
        if args.init and args.init is not None and self.start_dir not in sys.path:
            sys.path.append(self.start_dir)

        __version__ = get_version_from_pyproject()
        self.version = __version__

        self.keys = {
            "MACRO": "macro~~~~:",
            "MACRO_C": "m_color~~:",
            "HELPER": "helper~~~:",
            "debug": "debug~~~~:",
            "id": "name-spa~:",
            "st-load": "mute~load:",
            "comm-his": "comm-his~:",
            "develop-mode": "dev~mode~:",
            "provider::": "provider::",
        }

        defaults = {
            "MACRO": ['Exit'],
            "MACRO_C": {},
            "HELPER": {},
            "debug": args.debug,
            "id": self.id,
            "st-load": False,
            "comm-his": [[]],
            "develop-mode": False,
        }
        self.config_fh = FileHandler(collective_identification + ".config", keys=self.keys, defaults=defaults)
        self.config_fh.load_file_handler()
        self._debug = args.debug
        self.flows = {}
        self.dev_modi = self.config_fh.get_file_handler(self.keys["develop-mode"])
        if self.config_fh.get_file_handler("provider::") is None:
            self.config_fh.add_to_save_file_handler("provider::", "http://localhost:" + str(
                self.args_sto.port) if os.environ.get("HOSTNAME","localhost") == "localhost" else "https://simplecore.app")
        self.functions = {}
        self.modules = {}

        self.interface_type = ToolBoxInterfaces.native
        self.PREFIX = Style.CYAN(f"~{node()}@>")
        self.alive = True
        self.called_exit = False, time.time()

        self.print(f"Infos:\n  {'Name':<8} -> {node()}\n  {'ID':<8} -> {self.id}\n  {'Version':<8} -> {self.version}\n")

        self.logger.info(
            Style.GREEN(
                f"Finish init up in {time.perf_counter() - t0:.2f}s"
            )
        )

        self.args_sto = args
        self.loop = None

        from .system.session import Session
        self.session: Session = Session(self.get_username())
        if len(sys.argv) > 2 and sys.argv[1] == "db":
            return
        from .system.db_cli_manager import ClusterManager, get_executable_path
        self.cluster_manager = ClusterManager()
        online_list, server_list = self.cluster_manager.status_all(silent=True)
        if not server_list:
            self.cluster_manager.start_all(get_executable_path(), self.version)
            _, server_list = self.cluster_manager.status_all()
        from .extras.blobs import BlobStorage
        self.root_blob_storage = BlobStorage(servers=server_list, storage_directory=self.data_dir+ '\\blob_cache\\')
        # self._start_event_loop()

    def _start_event_loop(self):
        """Starts the asyncio event loop in a separate thread."""
        if self.loop is None:
            self.loop = asyncio.new_event_loop()
            self.loop_thread = threading.Thread(target=self.loop.run_forever, daemon=True)
            self.loop_thread.start()

    def get_username(self, get_input=False, default="loot") -> str:
        user_name = self.config_fh.get_file_handler("ac_user:::")
        if get_input and user_name is None:
            user_name = input("Input your username: ")
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        if user_name is None:
            user_name = default
            self.config_fh.add_to_save_file_handler("ac_user:::", user_name)
        return user_name

    def set_username(self, username):
        return self.config_fh.add_to_save_file_handler("ac_user:::", username)

    @staticmethod
    def exit_main(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def hide_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def show_console(*args, **kwargs):
        """proxi attr"""

    @staticmethod
    def disconnect(*args, **kwargs):
        """proxi attr"""

    def set_logger(self, debug=False):
        if "test" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.NOTSET, name="toolbox-test", interminal=True,
                                                     file_level=logging.NOTSET, app_name=self.id)
            logger_info_str = "in Test Mode"
        elif "live" in self.prefix and not debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-live", interminal=False,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in Live Mode"
            # setup_logging(logging.WARNING, name="toolbox-live", is_online=True
            #              , online_level=logging.WARNING).info("Logger initialized")
        elif "debug" in self.prefix or self.prefix.endswith("D"):
            self.prefix = self.prefix.replace("-debug", '').replace("debug", '')
            logger, logging_filename = setup_logging(logging.DEBUG, name="toolbox-debug", interminal=True,
                                                     file_level=logging.WARNING, app_name=self.id)
            logger_info_str = "in debug Mode"
            self.debug = True
        elif debug:
            logger, logging_filename = setup_logging(logging.DEBUG, name=f"toolbox-{self.prefix}-debug",
                                                     interminal=True,
                                                     file_level=logging.DEBUG, app_name=self.id)
            logger_info_str = "in args debug Mode"
        else:
            logger, logging_filename = setup_logging(logging.ERROR, name=f"toolbox-{self.prefix}", app_name=self.id)
            logger_info_str = "in Default"

        return logger_info_str, logger, logging_filename

    @property
    def debug(self):
        return self._debug

    @debug.setter
    def debug(self, value):
        if not isinstance(value, bool):
            self.logger.debug(f"Value must be an boolean. is : {value} type of {type(value)}")
            raise ValueError("Value must be an boolean.")

        # self.logger.info(f"Setting debug {value}")
        self._debug = value

    def debug_rains(self, e):
        if self.debug:
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)
            raise e
        else:
            self.logger.error(f"Error: {e}")
            import traceback
            x = "="*5
            x += " DEBUG "
            x += "="*5
            self.print(x)
            self.print(traceback.format_exc())
            self.print(x)

    def set_flows(self, r):
        self.flows = r

    async def run_flows(self, name, **kwargs):
        from ..flows import flows_dict as flows_dict_func
        if name not in self.flows:
            self.flows = {**self.flows, **flows_dict_func(s=name, remote=True)}
        if name in self.flows:
            if asyncio.iscoroutinefunction(self.flows[name]):
                return await self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
            else:
                return self.flows[name](get_app(from_="runner"), self.args_sto, **kwargs)
        else:
            print("Flow not found, active flows:", len(self.flows.keys()))

    def _coppy_mod(self, content, new_mod_dir, mod_name, file_type='py'):

        mode = 'xb'
        self.logger.info(f" coppy mod {mod_name} to {new_mod_dir} size : {sys.getsizeof(content) / 8388608:.3f} mb")

        if not os.path.exists(new_mod_dir):
            os.makedirs(new_mod_dir)
            with open(f"{new_mod_dir}/__init__.py", "w") as nmd:
                nmd.write(f"__version__ = '{self.version}'")

        if os.path.exists(f"{new_mod_dir}/{mod_name}.{file_type}"):
            mode = False

            with open(f"{new_mod_dir}/{mod_name}.{file_type}", 'rb') as d:
                runtime_mod = d.read()  # Testing version but not efficient

            if len(content) != len(runtime_mod):
                mode = 'wb'

        if mode:
            with open(f"{new_mod_dir}/{mod_name}.{file_type}", mode) as f:
                f.write(content)

    def _pre_lib_mod(self, mod_name, path_to="./runtime", file_type='py'):
        working_dir = self.id.replace(".", "_")
        lib_mod_dir = f"toolboxv2.runtime.{working_dir}.mod_lib."

        self.logger.info(f"pre_lib_mod {mod_name} from {lib_mod_dir}")

        postfix = "_dev" if self.dev_modi else ""
        mod_file_dir = f"./mods{postfix}/{mod_name}.{file_type}"
        new_mod_dir = f"{path_to}/{working_dir}/mod_lib"
        with open(mod_file_dir, "rb") as c:
            content = c.read()
        self._coppy_mod(content, new_mod_dir, mod_name, file_type=file_type)
        return lib_mod_dir

    def _copy_load(self, mod_name, file_type='py', **kwargs):
        loc = self._pre_lib_mod(mod_name, file_type)
        return self.inplace_load_instance(mod_name, loc=loc, **kwargs)

    def helper_install_pip_module(self, module_name):
        if 'main' in self.id:
            return
        self.print(f"Installing {module_name} GREEDY")
        os.system(f"{sys.executable} -m pip install {module_name}")

    def python_module_import_classifier(self, mod_name, error_message):

        if error_message.startswith("No module named 'toolboxv2.utils"):
            return Result.default_internal_error(f"404 {error_message.split('utils')[1]} not found")
        if error_message.startswith("No module named 'toolboxv2.mods"):
            if mod_name.startswith('.'):
                return
            return self.run_a_from_sync(self.a_run_any, ("CloudM", "install"), module_name=mod_name)
        if error_message.startswith("No module named '"):
            pip_requ = error_message.split("'")[1].replace("'", "").strip()
            # if 'y' in input(f"\t\t\tAuto install {pip_requ} Y/n").lower:
            return self.helper_install_pip_module(pip_requ)
            # return Result.default_internal_error(f"404 {pip_requ} not found")

    def inplace_load_instance(self, mod_name, loc="toolboxv2.mods.", spec='app', save=True, mfo=None):
        if self.dev_modi and loc == "toolboxv2.mods.":
            loc = "toolboxv2.mods_dev."
        if spec=='app' and self.mod_online(mod_name):
            self.logger.info(f"Reloading mod from : {loc + mod_name}")
            self.remove_mod(mod_name, spec=spec, delete=False)

        if (os.path.exists(self.start_dir + '/mods/' + mod_name) or os.path.exists(
            self.start_dir + '/mods/' + mod_name + '.py')) and (
            os.path.isdir(self.start_dir + '/mods/' + mod_name) or os.path.isfile(
            self.start_dir + '/mods/' + mod_name + '.py')):
            try:
                if mfo is None:
                    modular_file_object = import_module(loc + mod_name)
                else:
                    modular_file_object = mfo
                self.modules[mod_name] = modular_file_object
            except ModuleNotFoundError as e:
                self.logger.error(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                self.print(Style.RED(f"module {loc + mod_name} not found is type sensitive {e}"))
                if self.debug or self.args_sto.sysPrint:
                    self.python_module_import_classifier(mod_name, str(e))
                self.debug_rains(e)
                return None
        else:
            self.print(f"module {loc + mod_name} is not valid")
            return None
        if hasattr(modular_file_object, "Tools"):
            tools_class = modular_file_object.Tools
        else:
            if hasattr(modular_file_object, "name"):
                tools_class = modular_file_object
                modular_file_object = import_module(loc + mod_name)
            else:
                tools_class = None

        modular_id = None
        instance = modular_file_object
        app_instance_type = "file/application"

        if tools_class is None:
            modular_id = modular_file_object.Name if hasattr(modular_file_object, "Name") else mod_name

        if tools_class is None and modular_id is None:
            modular_id = str(modular_file_object.__name__)
            self.logger.warning(f"Unknown instance loaded {mod_name}")
            return modular_file_object

        if tools_class is not None:
            tools_class = self.save_initialized_module(tools_class, spec)
            modular_id = tools_class.name
            app_instance_type = "functions/class"
        else:
            instance.spec = spec
        # if private:
        #     self.functions[modular_id][f"{spec}_private"] = private

        if not save:
            return instance if tools_class is None else tools_class

        return self.save_instance(instance, modular_id, spec, app_instance_type, tools_class=tools_class)

    def save_instance(self, instance, modular_id, spec='app', instance_type="file/application", tools_class=None):

        if modular_id in self.functions and tools_class is None:
            if self.functions[modular_id].get(f"{spec}_instance", None) is None:
                self.functions[modular_id][f"{spec}_instance"] = instance
                self.functions[modular_id][f"{spec}_instance_type"] = instance_type
            else:
                self.print("Firest instance stays use new spec to get new instance")
                if modular_id in self.functions and self.functions[modular_id].get(f"{spec}_instance", None) is not None:
                    return self.functions[modular_id][f"{spec}_instance"]
                else:
                    raise ImportError(f"Module already known {modular_id} and not avalabel reload using other spec then {spec}")

        elif tools_class is not None:
            if modular_id not in self.functions:
                self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = tools_class
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

            try:
                if not hasattr(tools_class, 'tools'):
                    tools_class.tools = {"Version": tools_class.get_version, 'name': tools_class.name}
                for function_name in list(tools_class.tools.keys()):
                    t_function_name = function_name.lower()
                    if t_function_name != "all" and t_function_name != "name":
                        self.tb(function_name, mod_name=modular_id)(tools_class.tools.get(function_name))
                self.functions[modular_id][f"{spec}_instance_type"] += "/BC"
                if hasattr(tools_class, 'on_exit'):
                    if "on_exit" in self.functions[modular_id]:
                        self.functions[modular_id]["on_exit"].append(tools_class.on_exit)
                    else:
                        self.functions[modular_id]["on_exit"] = [tools_class.on_exit]
            except Exception as e:
                self.logger.error(f"Starting Module {modular_id} compatibility failed with : {e}")
                pass
        elif modular_id not in self.functions and tools_class is None:
            self.functions[modular_id] = {}
            self.functions[modular_id][f"{spec}_instance"] = instance
            self.functions[modular_id][f"{spec}_instance_type"] = instance_type

        else:
            raise ImportError(f"Modular {modular_id} is not a valid mod")
        on_start = self.functions[modular_id].get("on_start")
        if on_start is not None:
            i = 1
            for f in on_start:
                try:
                    f_, e = self.get_function((modular_id, f), state=True, specification=spec)
                    if e == 0:
                        self.logger.info(Style.GREY(f"Running On start {f} {i}/{len(on_start)}"))
                        if asyncio.iscoroutinefunction(f_):
                            self.print(f"Async on start is only in Tool claas supported for {modular_id}.{f}" if tools_class is None else f"initialization starting soon for {modular_id}.{f}")
                            self.run_bg_task_advanced(f_)
                        else:
                            o = f_()
                            if o is not None:
                                self.print(f"Function {modular_id} On start result: {o}")
                    else:
                        self.logger.warning(f"starting function not found {e}")
                except Exception as e:
                    self.logger.debug(Style.YELLOW(
                        Style.Bold(f"modular:{modular_id}.{f} on_start error {i}/{len(on_start)} -> {e}")))
                    self.debug_rains(e)
                finally:
                    i += 1
        return instance if tools_class is None else tools_class

    def save_initialized_module(self, tools_class, spec):
        tools_class.spec = spec
        live_tools_class = tools_class(app=self)
        return live_tools_class

    def mod_online(self, mod_name, installed=False):
        if installed and mod_name not in self.functions:
            self.save_load(mod_name)
        return mod_name in self.functions

    def _get_function(self,
                      name: Enum or None,
                      state: bool = True,
                      specification: str = "app",
                      metadata=False, as_str: tuple or None = None, r=0, **kwargs):

        if as_str is None and isinstance(name, Enum):
            modular_id = str(name.NAME.value)
            function_id = str(name.value)
        elif as_str is None and isinstance(name, list):
            modular_id, function_id = name[0], name[1]
        else:
            modular_id, function_id = as_str

        self.logger.info(f"getting function : {specification}.{modular_id}.{function_id}")

        if modular_id not in self.functions:
            if r == 0:
                self.save_load(modular_id, spec=specification)
                return self.get_function(name=(modular_id, function_id),
                                         state=state,
                                         specification=specification,
                                         metadata=metadata,
                                         r=1)
            self.logger.warning(f"function modular not found {modular_id} 404")
            return "404", 404

        if function_id not in self.functions[modular_id]:
            self.logger.warning(f"function data not found {modular_id}.{function_id} 404")
            return "404", 404

        function_data = self.functions[modular_id][function_id]

        if isinstance(function_data, list):
            print(f"functions {function_id} : {function_data}")
            function_data = self.functions[modular_id][function_data[kwargs.get('i', -1)]]
            print(f"functions {modular_id} : {function_data}")
        function = function_data.get("func")
        params = function_data.get("params")

        state_ = function_data.get("state")
        if state_ is not None and state != state_:
            state = state_

        if function is None:
            self.logger.warning("No function found")
            return "404", 404

        if params is None:
            self.logger.warning("No function (params) found")
            return "404", 301

        if metadata and not state:
            self.logger.info("returning metadata stateless")
            return (function_data, function), 0

        if not state:  # mens a stateless function
            self.logger.info("returning stateless function")
            return function, 0

        instance = self.functions[modular_id].get(f"{specification}_instance")

        # instance_type = self.functions[modular_id].get(f"{specification}_instance_type", "functions/class")

        if params[0] == 'app':
            instance = get_app(from_=f"fuction {specification}.{modular_id}.{function_id}")

        if instance is None and self.alive:
            self.inplace_load_instance(modular_id, spec=specification)
            instance = self.functions[modular_id].get(f"{specification}_instance")

        if instance is None:
            self.logger.warning("No live Instance found")
            return "404", 400

        # if instance_type.endswith("/BC"):  # for backwards compatibility  functions/class/BC old modules
        #     # returning as stateless
        #     # return "422", -1
        #     self.logger.info(
        #         f"returning stateless function, cant find tools class for state handling found {instance_type}")
        #     if metadata:
        #         self.logger.info(f"returning metadata stateless")
        #         return (function_data, function), 0
        #     return function, 0

        self.logger.info("wrapping in higher_order_function")

        self.logger.info(f"returned fuction {specification}.{modular_id}.{function_id}")
        higher_order_function = partial(function, instance)

        if metadata:
            self.logger.info("returning metadata stateful")
            return (function_data, higher_order_function), 0

        self.logger.info("returning stateful function")
        return higher_order_function, 0

    def save_exit(self):
        self.logger.info(f"save exiting saving data to {self.config_fh.file_handler_filename} states of {self.debug=}")
        self.config_fh.add_to_save_file_handler(self.keys["debug"], str(self.debug))

    def init_mod(self, mod_name, spec='app'):
        """
        Initializes a module in a thread-safe manner by submitting the
        asynchronous initialization to the running event loop.
        """
        if '.' in mod_name:
            mod_name = mod_name.split('.')[0]
        self.run_bg_task(self.a_init_mod, mod_name, spec)
        # loop = self.loop_gard()
        # if loop:
        #     # Create a future to get the result from the coroutine
        #     future: Future = asyncio.run_coroutine_threadsafe(
        #         self.a_init_mod(mod_name, spec), loop
        #     )
        #     # Block until the result is available
        #     return future.result()
        # else:
        #     raise ValueError("Event loop is not running")
        #     # return self.loop_gard().run_until_complete(self.a_init_mod(mod_name, spec))

    def run_bg_task(self, task: Callable, *args, **kwargs) -> asyncio.Task | None:
        """
        Runs a coroutine in the background without blocking the caller.

        This is the primary method for "fire-and-forget" async tasks. It schedules
        the coroutine to run on the application's main event loop.

        Args:
            task: The coroutine function to run.
            *args: Arguments to pass to the coroutine function.
            **kwargs: Keyword arguments to pass to the coroutine function.

        Returns:
            An asyncio.Task object representing the scheduled task, or None if
            the task could not be scheduled.
        """
        if not callable(task):
            self.logger.warning("Task passed to run_bg_task is not callable!")
            return None

        if not asyncio.iscoroutinefunction(task) and not asyncio.iscoroutine(task):
            self.logger.warning(f"Task '{getattr(task, '__name__', 'unknown')}' is not a coroutine. "
                                f"Use run_bg_task_advanced for synchronous functions.")
            # Fallback to advanced runner for convenience
            self.run_bg_task_advanced(task, *args, **kwargs)
            return None

        try:
            loop = self.loop_gard()
            if not loop.is_running():
                # If the main loop isn't running, we can't create a task on it.
                # This scenario is handled by run_bg_task_advanced.
                self.logger.info("Main event loop not running. Delegating to advanced background runner.")
                return self.run_bg_task_advanced(task, *args, **kwargs)

            # Create the coroutine if it's a function
            coro = task(*args, **kwargs) if asyncio.iscoroutinefunction(task) else task

            # Create a task on the running event loop
            bg_task = loop.create_task(coro)

            # Add a callback to log exceptions from the background task
            def _log_exception(the_task: asyncio.Task):
                if not the_task.cancelled() and the_task.exception():
                    self.logger.error(f"Exception in background task '{the_task.get_name()}':",
                                      exc_info=the_task.exception())

            bg_task.add_done_callback(_log_exception)
            self.bg_tasks.append(bg_task)
            return bg_task

        except Exception as e:
            self.logger.error(f"Failed to schedule background task: {e}", exc_info=True)
            return None

    def run_bg_task_advanced(self, task: Callable, *args, **kwargs) -> threading.Thread:
        """
        Runs a task in a separate, dedicated background thread with its own event loop.

        This is ideal for:
        1. Running an async task from a synchronous context.
        2. Launching a long-running, independent operation that should not
           interfere with the main application's event loop.

        Args:
            task: The function to run (can be sync or async).
            *args: Arguments for the task.
            **kwargs: Keyword arguments for the task.

        Returns:
            The threading.Thread object managing the background execution.
        """
        if not callable(task):
            self.logger.warning("Task for run_bg_task_advanced is not callable!")
            return None

        def thread_target():
            # Each thread gets its own event loop.
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                # Prepare the coroutine we need to run
                if asyncio.iscoroutinefunction(task):
                    coro = task(*args, **kwargs)
                elif asyncio.iscoroutine(task):
                    # It's already a coroutine object
                    coro = task
                else:
                    # It's a synchronous function, run it in an executor
                    # to avoid blocking the new event loop.
                    coro = loop.run_in_executor(None, lambda: task(*args, **kwargs))

                # Run the coroutine to completion
                result = loop.run_until_complete(coro)
                self.logger.debug(f"Advanced background task '{getattr(task, '__name__', 'unknown')}' completed.")
                if result is not None:
                    self.logger.debug(f"Task result: {str(result)[:100]}")

            except Exception as e:
                self.logger.error(f"Error in advanced background task '{getattr(task, '__name__', 'unknown')}':",
                                  exc_info=e)
            finally:
                # Cleanly shut down the event loop in this thread.
                try:
                    all_tasks = asyncio.all_tasks(loop=loop)
                    if all_tasks:
                        for t in all_tasks:
                            t.cancel()
                        loop.run_until_complete(asyncio.gather(*all_tasks, return_exceptions=True))
                finally:
                    loop.close()
                    asyncio.set_event_loop(None)

        # Create, start, and return the thread.
        # It's a daemon thread so it won't prevent the main app from exiting.
        t = threading.Thread(target=thread_target, daemon=True, name=f"BGTask-{getattr(task, '__name__', 'unknown')}")
        self.bg_tasks.append(t)
        t.start()
        return t

    # Helper method to wait for background tasks to complete (optional)
    def wait_for_bg_tasks(self, timeout=None):
        """
        Wait for all background tasks to complete.

        Args:
            timeout: Maximum time to wait (in seconds) for all tasks to complete.
                     None means wait indefinitely.

        Returns:
            bool: True if all tasks completed, False if timeout occurred
        """
        active_tasks = [t for t in self.bg_tasks if t.is_alive()]

        for task in active_tasks:
            task.join(timeout=timeout)
            if task.is_alive():
                return False

        return True

    def __call__(self, *args, **kwargs):
        return self.run(*args, **kwargs)

    def run(self, *args, request=None, running_function_coro=None, **kwargs):
        """
        Run a function with support for SSE streaming in both
        threaded and non-threaded contexts.
        """
        if running_function_coro is None:
            mn, fn = args[0]
            if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
                kwargs["request"] = RequestData.from_dict(request)
                if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                    kwargs["request"].data = kwargs["request"].body = kwargs['data']
                    del kwargs['data']
                if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                           []):
                    kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                    del kwargs['form_data']

        # Create the coroutine
        coro = running_function_coro or self.a_run_any(*args, **kwargs)

        # Get or create an event loop
        try:
            loop = asyncio.get_event_loop()
            is_running = loop.is_running()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)
            is_running = False

        # If the loop is already running, run in a separate thread
        if is_running:
            # Create thread pool executor as needed
            if not hasattr(self.__class__, '_executor'):
                self.__class__._executor = ThreadPoolExecutor(max_workers=4)

            def run_in_new_thread():
                # Set up a new loop in this thread
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)

                try:
                    # Run the coroutine
                    return new_loop.run_until_complete(coro)
                finally:
                    new_loop.close()

            # Run in thread and get result
            thread_result = self.__class__._executor.submit(run_in_new_thread).result()

            # Handle streaming results from thread
            if isinstance(thread_result, dict) and thread_result.get("is_stream"):
                # Create a new SSE stream in the main thread
                async def stream_from_function():
                    # Re-run the function with direct async access
                    stream_result = await self.a_run_any(*args, **kwargs)

                    if (isinstance(stream_result, Result) and
                        getattr(stream_result.result, 'data_type', None) == "stream"):
                        # Get and forward data from the original generator
                        original_gen = stream_result.result.data.get("generator")
                        if inspect.isasyncgen(original_gen):
                            async for item in original_gen:
                                yield item

                # Return a new streaming Result
                return Result.stream(
                    stream_generator=stream_from_function(),
                    headers=thread_result.get("headers", {})
                )

            result = thread_result
        else:
            # Direct execution when loop is not running
            result = loop.run_until_complete(coro)

        # Process the final result
        if isinstance(result, Result):
            if 'debug' in self.id:
                result.print()
            if getattr(result.result, 'data_type', None) == "stream":
                return result
            return result.to_api_result().model_dump(mode='json')

        return result

    def loop_gard(self):
        if self.loop is None:
            self._start_event_loop()
            self.loop = asyncio.get_event_loop()
        if self.loop.is_closed():
            self.loop = asyncio.get_event_loop()
        return self.loop

    async def a_init_mod(self, mod_name, spec='app'):
        mod = self.save_load(mod_name, spec=spec)
        if hasattr(mod, "__initobj") and not mod.async_initialized:
            await mod
        return mod


    def load_mod(self, mod_name: str, mlm='I', **kwargs):

        action_list_helper = ['I (inplace load dill on error python)',
                              # 'C (coppy py file to runtime dir)',
                              # 'S (save py file to dill)',
                              # 'CS (coppy and save py file)',
                              # 'D (development mode, inplace load py file)'
                              ]
        action_list = {"I": lambda: self.inplace_load_instance(mod_name, **kwargs),
                       "C": lambda: self._copy_load(mod_name, **kwargs)
                       }

        try:
            if mlm in action_list:

                return action_list.get(mlm)()
            else:
                self.logger.critical(
                    f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
                raise ValueError(f"config mlm must be {' or '.join(action_list_helper)} is {mlm=}")
        except ValueError as e:
            self.logger.warning(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except ImportError as e:
            self.logger.error(Style.YELLOW(f"Error Loading Module '{mod_name}', with error :{e}"))
            self.debug_rains(e)
        except Exception as e:
            self.logger.critical(Style.RED(f"Error Loading Module '{mod_name}', with critical error :{e}"))
            print(Style.RED(f"Error Loading Module '{mod_name}'"))
            self.debug_rains(e)

        return Result.default_internal_error(info="info's in logs.")

    async def load_external_mods(self):
        for mod_path in os.getenv("EXTERNAL_PATH_RUNNABLE", '').split(','):
            if mod_path:
                await self.load_all_mods_in_file(mod_path)

    async def load_all_mods_in_file(self, working_dir="mods"):
        print(f"LOADING ALL MODS FROM FOLDER : {working_dir}")
        t0 = time.perf_counter()
        # Get the list of all modules
        module_list = self.get_all_mods(working_dir)
        open_modules = self.functions.keys()
        start_len = len(open_modules)

        for om in open_modules:
            if om in module_list:
                module_list.remove(om)

        tasks: set[Task] = set()

        _ = {tasks.add(asyncio.create_task(asyncio.to_thread(self.save_load, mod, 'app'))) for mod in module_list}
        for t in asyncio.as_completed(tasks):
            try:
                result = await t
                if hasattr(result, 'Name'):
                    print('Opened :', result.Name)
                elif hasattr(result, 'name'):
                    if hasattr(result, 'async_initialized'):
                        if not result.async_initialized:
                            async def _():
                                try:
                                    if asyncio.iscoroutine(result):
                                        await result
                                    if hasattr(result, 'Name'):
                                        print('Opened :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Opened :', result.name)
                                except Exception as e:
                                    self.debug_rains(e)
                                    if hasattr(result, 'Name'):
                                        print('Error opening :', result.Name)
                                    elif hasattr(result, 'name'):
                                        print('Error opening :', result.name)
                            asyncio.create_task(_())
                        else:
                            print('Opened :', result.name)
                else:
                    print('Opened :', result)
            except Exception as e:
                self.logger.error(Style.RED(f"An Error occurred while opening all modules error: {str(e)}"))
                self.debug_rains(e)
        opened = len(self.functions.keys()) - start_len

        self.logger.info(f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s")
        return f"Opened {opened} modules in {time.perf_counter() - t0:.2f}s"

    def get_all_mods(self, working_dir="mods", path_to="./runtime", use_wd=True):
        self.logger.info(f"collating all mods in working directory {working_dir}")

        pr = "_dev" if self.dev_modi else ""
        if working_dir == "mods" and use_wd:
            working_dir = f"{self.start_dir}/mods{pr}"
        elif use_wd:
            pass
        else:
            w_dir = self.id.replace(".", "_")
            working_dir = f"{path_to}/{w_dir}/mod_lib{pr}/"
        res = os.listdir(working_dir)

        self.logger.info(f"found : {len(res)} files")

        def do_helper(_mod):
            if "mainTool" in _mod:
                return False
            # if not _mod.endswith(".py"):
            #     return False
            if _mod.startswith("__"):
                return False
            if _mod.startswith("."):
                return False
            return not _mod.startswith("test_")

        def r_endings(word: str):
            if word.endswith(".py"):
                return word[:-3]
            return word

        mods_list = list(map(r_endings, filter(do_helper, res)))

        self.logger.info(f"found : {len(mods_list)} Modules")
        return mods_list

    def remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            self.remove_mod(mod, delete=delete)

    def remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return

        on_exit = self.functions[mod_name].get("on_exit")
        self.logger.info(f"closing: {on_exit}")
        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    self.exit_tasks.append(instance.on_exit)
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1

        for j, f in enumerate(on_exit):
            try:
                f_, e = self.get_function((mod_name, f), state=True, specification=spec, i=j)
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        self.exit_tasks.append(f_)
                        o = None
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))

                self.debug_rains(e)
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    async def a_remove_all_modules(self, delete=False):
        for mod in list(self.functions.keys()):
            self.logger.info(f"closing: {mod}")
            await self.a_remove_mod(mod, delete=delete)

    async def a_remove_mod(self, mod_name, spec='app', delete=True):
        if mod_name not in self.functions:
            self.logger.info(f"mod not active {mod_name}")
            return
        on_exit = self.functions[mod_name].get("on_exit")
        self.logger.info(f"closing: {on_exit}")
        def helper():
            if f"{spec}_instance" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance"]
            if f"{spec}_instance_type" in self.functions[mod_name]:
                del self.functions[mod_name][f"{spec}_instance_type"]

        if on_exit is None and self.functions[mod_name].get(f"{spec}_instance_type", "").endswith("/BC"):
            instance = self.functions[mod_name].get(f"{spec}_instance", None)
            if instance is not None and hasattr(instance, 'on_exit'):
                if asyncio.iscoroutinefunction(instance.on_exit):
                    await instance.on_exit()
                else:
                    instance.on_exit()

        if on_exit is None and delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]
            return
        if on_exit is None:
            helper()
            return

        i = 1
        for f in on_exit:
            try:
                e = 1
                if isinstance(f, str):
                    f_, e = self.get_function((mod_name, f), state=True, specification=spec)
                elif isinstance(f, Callable):
                    f_, e, f  = f, 0, f.__name__
                if e == 0:
                    self.logger.info(Style.GREY(f"Running On exit {f} {i}/{len(on_exit)}"))
                    if asyncio.iscoroutinefunction(f_):
                        o = await f_()
                    else:
                        o = f_()
                    if o is not None:
                        self.print(f"Function On Exit result: {o}")
                else:
                    self.logger.warning("closing function not found")
            except Exception as e:
                self.logger.debug(
                    Style.YELLOW(Style.Bold(f"modular:{mod_name}.{f} on_exit error {i}/{len(on_exit)} -> {e}")))
                self.debug_rains(e)
            finally:
                i += 1

        helper()

        if delete:
            self.functions[mod_name] = {}
            del self.functions[mod_name]

    def exit(self, remove_all=True):
        if not self.alive:
            return
        if self.args_sto.debug:
            self.hide_console()
        self.disconnect()
        if remove_all:
            self.remove_all_modules()
        self.logger.info("Exiting ToolBox interface")
        self.alive = False
        self.called_exit = True, time.time()
        self.save_exit()
        if hasattr(self, 'root_blob_storage') and self.root_blob_storage:
            self.root_blob_storage.exit()
        try:
            self.config_fh.save_file_handler()
        except SystemExit:
            print("If u ar testing this is fine else ...")

        if hasattr(self, 'daemon_app'):
            import threading

            for thread in threading.enumerate()[::-1]:
                if thread.name == "MainThread":
                    continue
                try:
                    with Spinner(f"closing Thread {thread.name:^50}|", symbols="s", count_down=True,
                                 time_in_s=0.751 if not self.debug else 0.6):
                        thread.join(timeout=0.751 if not self.debug else 0.6)
                except TimeoutError as e:
                    self.logger.error(f"Timeout error on exit {thread.name} {str(e)}")
                    print(str(e), f"Timeout {thread.name}")
                except KeyboardInterrupt:
                    print("Unsave Exit")
                    break
        if hasattr(self, 'loop') and self.loop is not None:
            with Spinner("closing Event loop:", symbols="+"):
                self.loop.stop()

    async def a_exit(self):
        await self.a_remove_all_modules(delete=True)
        results = await asyncio.gather(
            *[asyncio.create_task(f()) for f in self.exit_tasks if asyncio.iscoroutinefunction(f)])
        for result in results:
            self.print(f"Function On Exit result: {result}")
        self.exit(remove_all=False)

    def save_load(self, modname, spec='app'):
        self.logger.debug(f"Save load module {modname}")
        if not modname:
            self.logger.warning("no filename specified")
            return False
        try:
            return self.load_mod(modname, spec=spec)
        except ModuleNotFoundError as e:
            self.logger.error(Style.RED(f"Module {modname} not found"))
            self.debug_rains(e)

        return False

    def get_function(self, name: Enum or tuple, **kwargs):
        """
        Kwargs for _get_function
            metadata:: return the registered function dictionary
                stateless: (function_data, None), 0
                stateful: (function_data, higher_order_function), 0
            state::boolean
                specification::str default app
        """
        if isinstance(name, tuple):
            return self._get_function(None, as_str=name, **kwargs)
        else:
            return self._get_function(name, **kwargs)

    async def a_run_function(self, mod_function_name: Enum or tuple,
                             tb_run_function_with_state=True,
                             tb_run_with_specification='app',
                             args_=None,
                             kwargs_=None,
                             *args,
                             **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if tb_run_with_specification == 'ws_internal':
            modular_name = modular_name.split('/')[0]
            if not self.mod_online(modular_name, installed=True):
                self.get_mod(modular_name)
            handler_id, event_name = mod_function_name
            if handler_id in self.websocket_handlers and event_name in self.websocket_handlers[handler_id]:
                handler_func = self.websocket_handlers[handler_id][event_name]
                try:
                    # Führe den asynchronen Handler aus
                    if inspect.iscoroutinefunction(handler_func):
                        await handler_func(self, **kwargs)
                    else:
                        handler_func(self, **kwargs)  # Für synchrone Handler
                    return Result.ok(info=f"WS handler '{event_name}' executed.")
                except Exception as e:
                    self.logger.error(f"Error in WebSocket handler '{handler_id}/{event_name}': {e}", exc_info=True)
                    return Result.default_internal_error(info=str(e))
            else:
                # Kein Handler registriert, aber das ist kein Fehler (z.B. on_connect ist optional)
                return Result.ok(info=f"No WS handler for '{event_name}'.")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 404:
            mod = self.get_mod(modular_name)
            if hasattr(mod, "async_initialized") and not mod.async_initialized:
                await mod
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 404:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == 300:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            return await self.a_fuction_runner(function, function_data, args, kwargs, t0)
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)


    def run_function(self, mod_function_name: Enum or tuple,
                     tb_run_function_with_state=True,
                     tb_run_with_specification='app',
                     args_=None,
                     kwargs_=None,
                     *args,
                     **kwargs) -> Result:

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_
        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value
        else:
            raise TypeError("Unknown function type")

        if not self.mod_online(modular_name, installed=True):
            self.get_mod(modular_name)

        if tb_run_with_specification == 'ws_internal':
            handler_id, event_name = mod_function_name
            if handler_id in self.websocket_handlers and event_name in self.websocket_handlers[handler_id]:
                handler_func = self.websocket_handlers[handler_id][event_name]
                try:
                    # Führe den asynchronen Handler aus
                    if inspect.iscoroutinefunction(handler_func):
                        return self.loop.run_until_complete(handler_func(self, **kwargs))
                    else:
                        handler_func(self, **kwargs)  # Für synchrone Handler
                    return Result.ok(info=f"WS handler '{event_name}' executed.")
                except Exception as e:
                    self.logger.error(f"Error in WebSocket handler '{handler_id}/{event_name}': {e}", exc_info=True)
                    return Result.default_internal_error(info=str(e))
            else:
                # Kein Handler registriert, aber das ist kein Fehler (z.B. on_connect ist optional)
                return Result.ok(info=f"No WS handler for '{event_name}'.")

        function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                      metadata=True, specification=tb_run_with_specification)
        self.logger.info(f"Received fuction : {mod_function_name}, with execode: {error_code}")
        if error_code == 1 or error_code == 3 or error_code == 400:
            self.get_mod(modular_name)
            function_data, error_code = self.get_function(mod_function_name, state=tb_run_function_with_state,
                                                          metadata=True, specification=tb_run_with_specification)

        if error_code == 2:
            self.logger.warning(Style.RED("Function Not Found"))
            return (Result.default_user_error(interface=self.interface_type,
                                              exec_code=404,
                                              info="function not found function is not decorated").
                    set_origin(mod_function_name))

        if error_code == -1:
            return Result.default_internal_error(interface=self.interface_type,
                                                 info=f"module {modular_name}"
                                                      f" has no state (instance)").set_origin(mod_function_name)

        if error_code != 0:
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=error_code,
                                                 info=f"Internal error"
                                                      f" {modular_name}."
                                                      f"{function_name}").set_origin(mod_function_name)

        if not tb_run_function_with_state:
            function_data, _ = function_data
            function = function_data.get('func')
        else:
            function_data, function = function_data

        if not function:
            self.logger.warning(Style.RED(f"Function {function_name} not found"))
            return Result.default_internal_error(interface=self.interface_type,
                                                 exec_code=404,
                                                 info="function not found function").set_origin(mod_function_name)

        self.logger.info("Profiling function")
        t0 = time.perf_counter()
        if asyncio.iscoroutinefunction(function):
            raise ValueError(f"Fuction {function_name} is Async use a_run_any")
        else:
            return self.fuction_runner(function, function_data, args, kwargs, t0)

    def run_a_from_sync(self, function, *args, **kwargs):
        # Initialize self.loop if not already set.
        if self.loop is None:
            try:
                self.loop = asyncio.get_running_loop()
            except RuntimeError:
                self.loop = asyncio.new_event_loop()

        # If the loop is running, offload the coroutine to a new thread.
        if self.loop.is_running():
            result_future = Future()

            def run_in_new_loop():
                new_loop = asyncio.new_event_loop()
                asyncio.set_event_loop(new_loop)
                try:
                    result = new_loop.run_until_complete(function(*args, **kwargs))
                    result_future.set_result(result)
                except Exception as e:
                    result_future.set_exception(e)
                finally:
                    new_loop.close()

            thread = threading.Thread(target=run_in_new_loop)
            thread.start()
            thread.join()  # Block until the thread completes.
            return result_future.result()
        else:
            # If the loop is not running, schedule and run the coroutine directly.
            future = self.loop.create_task(function(*args, **kwargs))
            return self.loop.run_until_complete(future)

    def fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = function()
            elif len(parameters) == len(args) + if_self_state:
                res = function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = function(**kwargs)
            else:
                res = function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)
            self.print(f"! Function ERROR: in {modular_name}.{function_name} ")



        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def a_fuction_runner(self, function, function_data: dict, args: list, kwargs: dict, t0=.0):

        parameters = function_data.get('params')
        modular_name = function_data.get('module_name')
        function_name = function_data.get('func_name')
        row = function_data.get('row')
        mod_function_name = f"{modular_name}.{function_name}"

        if_self_state = 1 if 'self' in parameters else 0

        try:
            if len(parameters) == 0:
                res = await function()
            elif len(parameters) == len(args) + if_self_state:
                res = await function(*args)
            elif len(parameters) == len(kwargs.keys()) + if_self_state:
                res = await function(**kwargs)
            else:
                res = await function(*args, **kwargs)
            self.logger.info(f"Execution done in {time.perf_counter()-t0:.4f}")
            if isinstance(res, Result):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.set_origin(mod_function_name)
            elif isinstance(res, ApiResult):
                formatted_result = res
                if formatted_result.origin is None:
                    formatted_result.as_result().set_origin(mod_function_name).to_api_result()
            elif row:
                formatted_result = res
            else:
                # Wrap the result in a Result object
                formatted_result = Result.ok(
                    interface=self.interface_type,
                    data_info="Auto generated result",
                    data=res,
                    info="Function executed successfully"
                ).set_origin(mod_function_name)
            if not row:
                self.logger.info(
                    f"Function Exec code: {formatted_result.info.exec_code} Info's: {formatted_result.info.help_text}")
            else:
                self.logger.info(
                    f"Function Exec data: {formatted_result}")
        except Exception as e:
            self.logger.error(
                Style.YELLOW(Style.Bold(
                    f"! Function ERROR: in {modular_name}.{function_name}")))
            # Wrap the exception in a Result object
            formatted_result = Result.default_internal_error(info=str(e)).set_origin(mod_function_name)
            # res = formatted_result
            self.logger.error(
                f"Function {modular_name}.{function_name}"
                f" executed wit an error {str(e)}, {type(e)}")
            self.debug_rains(e)

        else:
            self.print_ok()

            self.logger.info(
                f"Function {modular_name}.{function_name}"
                f" executed successfully")

        return formatted_result

    async def run_http(self, mod_function_name: Enum or str or tuple, function_name=None,
                       args_=None,
                       kwargs_=None, method="GET",
                       *args, **kwargs):
        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        modular_name = mod_function_name
        function_name = function_name

        if isinstance(mod_function_name, str) and isinstance(function_name, str):
            mod_function_name = (mod_function_name, function_name)

        if isinstance(mod_function_name, tuple):
            modular_name, function_name = mod_function_name
        elif isinstance(mod_function_name, list):
            modular_name, function_name = mod_function_name[0], mod_function_name[1]
        elif isinstance(mod_function_name, Enum):
            modular_name, function_name = mod_function_name.__class__.NAME.value, mod_function_name.value

        self.logger.info(f"getting function : {modular_name}.{function_name} from http {self.session.base}")
        r = await self.session.fetch(f"/api/{modular_name}/{function_name}{'?' + args_ if args_ is not None else ''}",
                                     data=kwargs, method=method)
        try:
            if not r:
                print("§ Session server Offline!", self.session.base)
                return Result.default_internal_error(info="Session fetch failed").as_dict()

            content_type = r.headers.get('Content-Type', '').lower()

            if 'application/json' in content_type:
                try:
                    return r.json()
                except Exception as e:
                    print(f"⚠ JSON decode error: {e}")
                    # Fallback to text if JSON decoding fails
                    text = r.text
            else:
                text = r.text

            if isinstance(text, Callable):
                if asyncio.iscoroutinefunction(text):
                    text = await text()
                else:
                    text = text()

            # Attempt YAML
            if 'yaml' in content_type or text.strip().startswith('---'):
                try:
                    import yaml
                    return yaml.safe_load(text)
                except Exception as e:
                    print(f"⚠ YAML decode error: {e}")

            # Attempt XML
            if 'xml' in content_type or text.strip().startswith('<?xml'):
                try:
                    import xmltodict
                    return xmltodict.parse(text)
                except Exception as e:
                    print(f"⚠ XML decode error: {e}")

            # Fallback: return plain text
            return Result.default_internal_error(data={'raw_text': text, 'content_type': content_type}).as_dict()

        except Exception as e:
            print("❌ Fatal error during API call:", e)
            self.debug_rains(e)
            return Result.default_internal_error(str(e)).as_dict()

    def run_local(self, *args, **kwargs):
        return self.run_any(*args, **kwargs)

    async def a_run_local(self, *args, **kwargs):
        return await self.a_run_any(*args, **kwargs)

    def run_any(self, mod_function_name: Enum or str or tuple, backwords_compability_variabel_string_holder=None,
                get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                kwargs_=None,
                *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = self.run_function(mod_function_name,
                                        tb_run_function_with_state=tb_run_function_with_state,
                                        tb_run_with_specification=tb_run_with_specification,
                                        args_=args, kwargs_=kwargs).as_result()
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.log(show_data=False)

        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res

    async def a_run_any(self, mod_function_name: Enum or str or tuple,
                        backwords_compability_variabel_string_holder=None,
                        get_results=False, tb_run_function_with_state=True, tb_run_with_specification='app', args_=None,
                        kwargs_=None,
                        *args, **kwargs):

        # if self.debug:
        #     self.logger.info(f'Called from: {getouterframes(currentframe(), 2)}')

        if kwargs_ is not None and not kwargs:
            kwargs = kwargs_
        if args_ is not None and not args:
            args = args_

        if isinstance(mod_function_name, str) and backwords_compability_variabel_string_holder is None:
            backwords_compability_variabel_string_holder = mod_function_name.split('.')[-1]
            mod_function_name = mod_function_name.replace(f".{backwords_compability_variabel_string_holder}", "")

        if isinstance(mod_function_name, str) and isinstance(backwords_compability_variabel_string_holder, str):
            mod_function_name = (mod_function_name, backwords_compability_variabel_string_holder)

        res: Result = await self.a_run_function(mod_function_name,
                                                tb_run_function_with_state=tb_run_function_with_state,
                                                tb_run_with_specification=tb_run_with_specification,
                                                args_=args, kwargs_=kwargs)
        if isinstance(res, ApiResult):
            res = res.as_result()

        if isinstance(res, Result) and res.bg_task is not None:
            self.run_bg_task(res.bg_task)

        if self.debug:
            res.print()
            res.log(show_data=False) if isinstance(res, Result) else self.logger.debug(res)
        if not get_results and isinstance(res, Result):
            return res.get()

        if get_results and not isinstance(res, Result):
            return Result.ok(data=res)

        return res


    def web_context(self):
        if self._web_context is None:
            try:
                self._web_context = open("./dist/helper.html", encoding="utf-8").read()
            except Exception as e:
                self.logger.error(f"Could not load web context: {e}")
                self._web_context = "<div><h1>Web Context not found</h1></div>"
        return self._web_context

    def get_mod(self, name, spec='app') -> ModuleType or MainToolType:
        if spec != "app":
            self.print(f"Getting Module {name} spec: {spec}")
        if name not in self.functions:
            mod = self.save_load(name, spec=spec)
            if mod is False or (isinstance(mod, Result) and mod.is_error()):
                self.logger.warning(f"Could not find {name} in {list(self.functions.keys())}")
                raise ValueError(f"Could not find {name} in {list(self.functions.keys())} pleas install the module, or its posibly broken use --debug for infos")
        # private = self.functions[name].get(f"{spec}_private")
        # if private is not None:
        #     if private and spec != 'app':
        #         raise ValueError("Module is private")
        if name not in self.functions:
            self.logger.warning(f"Module '{name}' is not found")
            return None
        instance = self.functions[name].get(f"{spec}_instance")
        if instance is None:
            return self.load_mod(name, spec=spec)
        return self.functions[name].get(f"{spec}_instance")

    def print(self, text="", *args, **kwargs):
        # self.logger.info(f"Output : {text}")
        if 'live' in self.id:
            return

        flush = kwargs.pop('flush', True)
        if self.sprint(None):
            print(Style.CYAN(f"System${self.id}:"), end=" ", flush=flush)
        print(text, *args, **kwargs, flush=flush)

    def sprint(self, text="", *args, **kwargs):
        if text is None:
            return True
        if 'live' in self.id:
            return
        flush = kwargs.pop('flush', True)
        # self.logger.info(f"Output : {text}")
        print(Style.CYAN(f"System${self.id}:"), end=" ", flush=flush)
        if isinstance(text, str) and kwargs == {} and text:
            stram_print(text + ' '.join(args))
            print()
        else:
            print(text, *args, **kwargs, flush=flush)

    # ----------------------------------------------------------------
    # Decorators for the toolbox

    def reload_mod(self, mod_name, spec='app', is_file=True, loc="toolboxv2.mods."):
        self.remove_mod(mod_name, delete=True)
        if mod_name not in self.modules:
            self.logger.warning(f"Module '{mod_name}' is not found")
            return
        if hasattr(self.modules[mod_name], 'reload_save') and self.modules[mod_name].reload_save:
            def reexecute_module_code(x):
                return x
        else:
            def reexecute_module_code(module_name):
                if isinstance(module_name, str):
                    module = import_module(module_name)
                else:
                    module = module_name
                # Get the source code of the module
                try:
                    source = inspect.getsource(module)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    return module
                # Compile the source code
                try:
                    code = compile(source, module.__file__, 'exec')
                    # Execute the code in the module's namespace
                    exec(code, module.__dict__)
                except Exception:
                    # print(f"No source for {str(module_name).split('from')[0]}: {e}")
                    pass
                return module

        if not is_file:
            mods = self.get_all_mods("./mods/" + mod_name)
            def recursive_reload(package_name):
                package = import_module(package_name)

                # First, reload all submodules
                if hasattr(package, '__path__'):
                    for _finder, name, _ispkg in pkgutil.walk_packages(package.__path__, package.__name__ + "."):
                        try:
                            mod = import_module(name)
                            reexecute_module_code(mod)
                            reload(mod)
                        except Exception as e:
                            print(f"Error reloading module {name}: {e}")
                            break

                # Finally, reload the package itself
                reexecute_module_code(package)
                reload(package)

            for mod in mods:
                if mod.endswith(".txt") or mod.endswith(".yaml"):
                    continue
                try:
                    recursive_reload(loc + mod_name + '.' + mod)
                    self.print(f"Reloaded {mod_name}.{mod}")
                except ImportError:
                    self.print(f"Could not load {mod_name}.{mod}")
        reexecute_module_code(self.modules[mod_name])
        if mod_name in self.functions:
            if "on_exit" in self.functions[mod_name]:
                self.functions[mod_name]["on_exit"] = []
            if "on_start" in self.functions[mod_name]:
                self.functions[mod_name]["on_start"] = []
        self.inplace_load_instance(mod_name, spec=spec, mfo=reload(self.modules[mod_name]) if mod_name in self.modules else None)

    def watch_mod(self, mod_name, spec='app', loc="toolboxv2.mods.", use_thread=True, path_name=None, on_reload=None):
        if path_name is None:
            path_name = mod_name
        is_file = os.path.isfile(self.start_dir + '/mods/' + path_name + '.py')
        import watchfiles
        def helper():
            paths = f'mods/{path_name}' + ('.py' if is_file else '')
            self.logger.info(f'Watching Path: {paths}')
            try:
                for changes in watchfiles.watch(paths):
                    if not changes:
                        continue
                    self.reload_mod(mod_name, spec, is_file, loc)
                    if on_reload:
                        on_reload()
            except FileNotFoundError:
                self.logger.warning(f"Path {paths} not found")

        if not use_thread:
            helper()
        else:
            threading.Thread(target=helper, daemon=True).start()

    def _register_function(self, module_name, func_name, data):
        if module_name not in self.functions:
            self.functions[module_name] = {}
        if func_name in self.functions[module_name]:
            self.print(f"Overriding function {func_name} from {module_name}", end="\r")
            self.functions[module_name][func_name] = data
        else:
            self.functions[module_name][func_name] = data

    def _create_decorator(self, type_: str,
                          name: str = "",
                          mod_name: str = "",
                          level: int = -1,
                          restrict_in_virtual_mode: bool = False,
                          api: bool = False,
                          helper: str = "",
                          version: str or None = None,
                          initial: bool=False,
                          exit_f: bool=False,
                          test: bool=True,
                          samples:list[dict[str, Any]] | None=None,
                          state:bool | None=None,
                          pre_compute:Callable | None=None,
                          post_compute:Callable[[], Result] | None=None,
                          api_methods:list[str] | None=None,
                          memory_cache: bool=False,
                          file_cache: bool=False,
                          request_as_kwarg: bool=False,
                          row: bool=False,
                          memory_cache_max_size:int=100,
                          memory_cache_ttl:int=300,
                          websocket_handler: str | None = None,
                          ):

        if isinstance(type_, Enum):
            type_ = type_.value

        if memory_cache and file_cache:
            raise ValueError("Don't use both cash at the same time for the same fuction")

        use_cache = memory_cache or file_cache
        cache = {}
        if file_cache:
            cache = FileCache(folder=self.data_dir + f'\\cache\\{mod_name}\\',
                              filename=self.data_dir + f'\\cache\\{mod_name}\\{name}cache.db')
        if memory_cache:
            cache = MemoryCache(maxsize=memory_cache_max_size, ttl=memory_cache_ttl)

        version = self.version if version is None else self.version + ':' + version

        def a_additional_process(func):

            async def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = await pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = await func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = await post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            async def wrapper(*args, **kwargs):

                if not use_cache:
                    return await executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = await executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def additional_process(func):

            def executor(*args, **kwargs):

                if pre_compute is not None:
                    args, kwargs = pre_compute(*args, **kwargs)
                if asyncio.iscoroutinefunction(func):
                    result = func(*args, **kwargs)
                else:
                    result = func(*args, **kwargs)
                if post_compute is not None:
                    result = post_compute(result)
                if row:
                    return result
                if not isinstance(result, Result):
                    result = Result.ok(data=result)
                if result.origin is None:
                    result.set_origin((mod_name if mod_name else func.__module__.split('.')[-1]
                                       , name if name else func.__name__
                                       , type_))
                if result.result.data_to == ToolBoxInterfaces.native.name:
                    result.result.data_to = ToolBoxInterfaces.remote if api else ToolBoxInterfaces.native
                # Wenden Sie die to_api_result Methode auf das Ergebnis an, falls verfügbar
                if api and hasattr(result, 'to_api_result'):
                    return result.to_api_result()
                return result

            @wraps(func)
            def wrapper(*args, **kwargs):

                if not use_cache:
                    return executor(*args, **kwargs)

                try:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{str(args)},{str(kwargs.items())}")
                except ValueError:
                    cache_key = (f"{mod_name if mod_name else func.__module__.split('.')[-1]}"
                                 f"-{func.__name__}-{bytes(args)},{str(kwargs.items())}")

                result = cache.get(cache_key)
                if result is not None:
                    return result

                result = executor(*args, **kwargs)

                cache.set(cache_key, result)

                return result

            return wrapper

        def decorator(func):
            sig = signature(func)
            params = list(sig.parameters)
            module_name = mod_name if mod_name else func.__module__.split('.')[-1]
            func_name = name if name else func.__name__
            if func_name == 'on_start':
                func_name = 'on_startup'
            if func_name == 'on_exit':
                func_name = 'on_close'
            if api or pre_compute is not None or post_compute is not None or memory_cache or file_cache:
                if asyncio.iscoroutinefunction(func):
                    func = a_additional_process(func)
                else:
                    func = additional_process(func)
            if api and str(sig.return_annotation) == 'Result':
                raise ValueError(f"Fuction {module_name}.{func_name} registered as "
                                 f"Api fuction but uses {str(sig.return_annotation)}\n"
                                 f"Please change the sig from ..)-> Result to ..)-> ApiResult")
            data = {
                "type": type_,
                "module_name": module_name,
                "func_name": func_name,
                "level": level,
                "restrict_in_virtual_mode": restrict_in_virtual_mode,
                "func": func,
                "api": api,
                "helper": helper,
                "version": version,
                "initial": initial,
                "exit_f": exit_f,
                "api_methods": api_methods if api_methods is not None else ["AUTO"],
                "__module__": func.__module__,
                "signature": sig,
                "params": params,
                "row": row,
                "state": (
                    False if len(params) == 0 else params[0] in ['self', 'state', 'app']) if state is None else state,
                "do_test": test,
                "samples": samples,
                "request_as_kwarg": request_as_kwarg,

            }

            if websocket_handler:
                # Die dekorierte Funktion sollte ein Dict mit den Handlern zurückgeben
                try:
                    handler_config = func(self)  # Rufe die Funktion auf, um die Konfiguration zu erhalten
                    if not isinstance(handler_config, dict):
                        raise TypeError(
                            f"WebSocket handler function '{func.__name__}' must return a dictionary of handlers.")

                    # Handler-Identifikator, z.B. "ChatModule/room_chat"
                    handler_id = f"{module_name}/{websocket_handler}"
                    self.websocket_handlers[handler_id] = {}

                    for event_name, handler_func in handler_config.items():
                        if event_name in ["on_connect", "on_message", "on_disconnect"] and callable(handler_func):
                            self.websocket_handlers[handler_id][event_name] = handler_func
                        else:
                            self.logger.warning(f"Invalid WebSocket handler event '{event_name}' in '{handler_id}'.")

                    self.logger.info(f"Registered WebSocket handlers for '{handler_id}'.")

                except Exception as e:
                    self.logger.error(f"Failed to register WebSocket handlers for '{func.__name__}': {e}",
                                      exc_info=True)
            else:
                self._register_function(module_name, func_name, data)

            if exit_f:
                if "on_exit" not in self.functions[module_name]:
                    self.functions[module_name]["on_exit"] = []
                self.functions[module_name]["on_exit"].append(func_name)
            if initial:
                if "on_start" not in self.functions[module_name]:
                    self.functions[module_name]["on_start"] = []
                self.functions[module_name]["on_start"].append(func_name)

            return func

        decorator.tb_init = True

        return decorator

    def tb(self, name=None,
           mod_name: str = "",
           helper: str = "",
           version: str | None = None,
           test: bool = True,
           restrict_in_virtual_mode: bool = False,
           api: bool = False,
           initial: bool = False,
           exit_f: bool = False,
           test_only: bool = False,
           memory_cache: bool = False,
           file_cache: bool = False,
           request_as_kwarg: bool = False,
           row: bool = False,
           state: bool | None = None,
           level: int = -1,
           memory_cache_max_size: int = 100,
           memory_cache_ttl: int = 300,
           samples: list or dict or None = None,
           interface: ToolBoxInterfaces or None or str = None,
           pre_compute=None,
           post_compute=None,
           api_methods=None,
           websocket_handler: str | None = None,
           ):
        """
    A decorator for registering and configuring functions within a module.

    This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

    Args:
        name (str, optional): The name to register the function under. Defaults to the function's own name.
        mod_name (str, optional): The name of the module the function belongs to.
        helper (str, optional): A helper string providing additional information about the function.
        version (str or None, optional): The version of the function or module.
        test (bool, optional): Flag to indicate if the function is for testing purposes.
        restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
        api (bool, optional): Flag to indicate if the function is part of an API.
        initial (bool, optional): Flag to indicate if the function should be executed at initialization.
        exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
        test_only (bool, optional): Flag to indicate if the function should only be used for testing.
        memory_cache (bool, optional): Flag to enable memory caching for the function.
        request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
        file_cache (bool, optional): Flag to enable file caching for the function.
        row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
        state (bool or None, optional): Flag to indicate if the function maintains state.
        level (int, optional): The level of the function, used for prioritization or categorization.
        memory_cache_max_size (int, optional): Maximum size of the memory cache.
        memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
        samples (list or dict or None, optional): Samples or examples of function usage.
        interface (str, optional): The interface type for the function.
        pre_compute (callable, optional): A function to be called before the main function.
        post_compute (callable, optional): A function to be called after the main function.
        api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.
        websocket_handler (str, optional): The name of the websocket handler to use.

    Returns:
        function: The decorated function with additional processing and registration capabilities.
    """
        if interface is None:
            interface = "tb"
        if test_only and 'test' not in self.id:
            return lambda *args, **kwargs: args
        return self._create_decorator(interface,
                                      name,
                                      mod_name,
                                      level=level,
                                      restrict_in_virtual_mode=restrict_in_virtual_mode,
                                      helper=helper,
                                      api=api,
                                      version=version,
                                      initial=initial,
                                      exit_f=exit_f,
                                      test=test,
                                      samples=samples,
                                      state=state,
                                      pre_compute=pre_compute,
                                      post_compute=post_compute,
                                      memory_cache=memory_cache,
                                      file_cache=file_cache,
                                      request_as_kwarg=request_as_kwarg,
                                      row=row,
                                      api_methods=api_methods,
                                      memory_cache_max_size=memory_cache_max_size,
                                      memory_cache_ttl=memory_cache_ttl,
                                      websocket_handler=websocket_handler,
                                      )

    def save_autocompletion_dict(self):
        autocompletion_dict = {}
        for module_name, _module in self.functions.items():
            data = {}
            for function_name, function_data in self.functions[module_name].items():
                if not isinstance(function_data, dict):
                    continue
                data[function_name] = {arg: None for arg in
                                       function_data.get("params", [])}
                if len(data[function_name].keys()) == 0:
                    data[function_name] = None
            autocompletion_dict[module_name] = data if len(data.keys()) > 0 else None
        self.config_fh.add_to_save_file_handler("auto~~~~~~", str(autocompletion_dict))

    def get_autocompletion_dict(self):
        return self.config_fh.get_file_handler("auto~~~~~~")

    def save_registry_as_enums(self, directory: str, filename: str):
        # Ordner erstellen, falls nicht vorhanden
        if not os.path.exists(directory):
            os.makedirs(directory)

        # Dateipfad vorbereiten
        filepath = os.path.join(directory, filename)

        # Enum-Klassen als Strings generieren
        enum_classes = [f'"""Automatic generated by ToolBox v = {self.version}"""'
                        f'\nfrom enum import Enum\nfrom dataclasses import dataclass'
                        f'\n\n\n']
        for module, functions in self.functions.items():
            if module.startswith("APP_INSTANCE"):
                continue
            class_name = module
            enum_members = "\n    ".join(
                [
                    f"{func_name.upper().replace('-', '')}"
                    f" = '{func_name}' "
                    f"# Input: ({fuction_data['params'] if isinstance(fuction_data, dict) else ''}),"
                    f" Output: {fuction_data['signature'].return_annotation if isinstance(fuction_data, dict) else 'None'}"
                    for func_name, fuction_data in functions.items()])
            enum_class = (f'@dataclass\nclass {class_name.upper().replace(".", "_").replace("-", "")}(Enum):'
                          f"\n    NAME = '{class_name}'\n    {enum_members}")
            enum_classes.append(enum_class)

        # Enums in die Datei schreiben
        data = "\n\n\n".join(enum_classes)
        if len(data) < 12:
            raise ValueError(
                "Invalid Enums Loosing content pleas delete it ur self in the (utils/system/all_functions_enums.py) or add mor new stuff :}")
        with open(filepath, 'w') as file:
            file.write(data)

        print(Style.Bold(Style.BLUE(f"Enums gespeichert in {filepath}")))


    # WS logic

    def _set_rust_ws_bridge(self, bridge_object: Any):
        """
        Diese Methode wird von Rust aufgerufen, um die Kommunikationsbrücke zu setzen.
        Sie darf NICHT manuell von Python aus aufgerufen werden.
        """
        self.print(f"Rust WebSocket bridge has been set for instance {self.id}.")
        self._rust_ws_bridge = bridge_object

    async def ws_send(self, conn_id: str, payload: dict):
        """
        Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

        Args:
            conn_id: Die eindeutige ID der Zielverbindung.
            payload: Ein Dictionary, das als JSON gesendet wird.
        """
        if self._rust_ws_bridge is None:
            self.logger.error("Cannot send WebSocket message: Rust bridge is not initialized.")
            return

        try:
            # Ruft die asynchrone Rust-Methode auf und wartet auf deren Abschluss
            await self._rust_ws_bridge.send_message(conn_id, json.dumps(payload))
        except Exception as e:
            self.logger.error(f"Failed to send WebSocket message to {conn_id}: {e}", exc_info=True)

    async def ws_broadcast(self, channel_id: str, payload: dict, source_conn_id: str = "python_broadcast"):
        """
        Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

        Args:
            channel_id: Der Kanal, an den gesendet werden soll.
            payload: Ein Dictionary, das als JSON gesendet wird.
            source_conn_id (optional): Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.
        """
        if self._rust_ws_bridge is None:
            self.logger.error("Cannot broadcast WebSocket message: Rust bridge is not initialized.")
            return

        try:
            # Ruft die asynchrone Rust-Broadcast-Methode auf
            await self._rust_ws_bridge.broadcast_message(channel_id, json.dumps(payload), source_conn_id)
        except Exception as e:
            self.logger.error(f"Failed to broadcast WebSocket message to channel {channel_id}: {e}", exc_info=True)
disconnect(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
240
241
242
@staticmethod
def disconnect(*args, **kwargs):
    """proxi attr"""
exit_main(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
228
229
230
@staticmethod
def exit_main(*args, **kwargs):
    """proxi attr"""
get_function(name, **kwargs)

Kwargs for _get_function metadata:: return the registered function dictionary stateless: (function_data, None), 0 stateful: (function_data, higher_order_function), 0 state::boolean specification::str default app

Source code in toolboxv2/utils/toolbox.py
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
def get_function(self, name: Enum or tuple, **kwargs):
    """
    Kwargs for _get_function
        metadata:: return the registered function dictionary
            stateless: (function_data, None), 0
            stateful: (function_data, higher_order_function), 0
        state::boolean
            specification::str default app
    """
    if isinstance(name, tuple):
        return self._get_function(None, as_str=name, **kwargs)
    else:
        return self._get_function(name, **kwargs)
hide_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
232
233
234
@staticmethod
def hide_console(*args, **kwargs):
    """proxi attr"""
init_mod(mod_name, spec='app')

Initializes a module in a thread-safe manner by submitting the asynchronous initialization to the running event loop.

Source code in toolboxv2/utils/toolbox.py
621
622
623
624
625
626
627
628
def init_mod(self, mod_name, spec='app'):
    """
    Initializes a module in a thread-safe manner by submitting the
    asynchronous initialization to the running event loop.
    """
    if '.' in mod_name:
        mod_name = mod_name.split('.')[0]
    self.run_bg_task(self.a_init_mod, mod_name, spec)
run(*args, request=None, running_function_coro=None, **kwargs)

Run a function with support for SSE streaming in both threaded and non-threaded contexts.

Source code in toolboxv2/utils/toolbox.py
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
def run(self, *args, request=None, running_function_coro=None, **kwargs):
    """
    Run a function with support for SSE streaming in both
    threaded and non-threaded contexts.
    """
    if running_function_coro is None:
        mn, fn = args[0]
        if self.functions.get(mn, {}).get(fn, {}).get('request_as_kwarg', False):
            kwargs["request"] = RequestData.from_dict(request)
            if 'data' in kwargs and 'data' not in self.functions.get(mn, {}).get(fn, {}).get('params', []):
                kwargs["request"].data = kwargs["request"].body = kwargs['data']
                del kwargs['data']
            if 'form_data' in kwargs and 'form_data' not in self.functions.get(mn, {}).get(fn, {}).get('params',
                                                                                                       []):
                kwargs["request"].form_data = kwargs["request"].body = kwargs['form_data']
                del kwargs['form_data']

    # Create the coroutine
    coro = running_function_coro or self.a_run_any(*args, **kwargs)

    # Get or create an event loop
    try:
        loop = asyncio.get_event_loop()
        is_running = loop.is_running()
    except RuntimeError:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        is_running = False

    # If the loop is already running, run in a separate thread
    if is_running:
        # Create thread pool executor as needed
        if not hasattr(self.__class__, '_executor'):
            self.__class__._executor = ThreadPoolExecutor(max_workers=4)

        def run_in_new_thread():
            # Set up a new loop in this thread
            new_loop = asyncio.new_event_loop()
            asyncio.set_event_loop(new_loop)

            try:
                # Run the coroutine
                return new_loop.run_until_complete(coro)
            finally:
                new_loop.close()

        # Run in thread and get result
        thread_result = self.__class__._executor.submit(run_in_new_thread).result()

        # Handle streaming results from thread
        if isinstance(thread_result, dict) and thread_result.get("is_stream"):
            # Create a new SSE stream in the main thread
            async def stream_from_function():
                # Re-run the function with direct async access
                stream_result = await self.a_run_any(*args, **kwargs)

                if (isinstance(stream_result, Result) and
                    getattr(stream_result.result, 'data_type', None) == "stream"):
                    # Get and forward data from the original generator
                    original_gen = stream_result.result.data.get("generator")
                    if inspect.isasyncgen(original_gen):
                        async for item in original_gen:
                            yield item

            # Return a new streaming Result
            return Result.stream(
                stream_generator=stream_from_function(),
                headers=thread_result.get("headers", {})
            )

        result = thread_result
    else:
        # Direct execution when loop is not running
        result = loop.run_until_complete(coro)

    # Process the final result
    if isinstance(result, Result):
        if 'debug' in self.id:
            result.print()
        if getattr(result.result, 'data_type', None) == "stream":
            return result
        return result.to_api_result().model_dump(mode='json')

    return result
run_bg_task(task, *args, **kwargs)

Runs a coroutine in the background without blocking the caller.

This is the primary method for "fire-and-forget" async tasks. It schedules the coroutine to run on the application's main event loop.

Parameters:

Name Type Description Default
task Callable

The coroutine function to run.

required
*args

Arguments to pass to the coroutine function.

()
**kwargs

Keyword arguments to pass to the coroutine function.

{}

Returns:

Type Description
Task | None

An asyncio.Task object representing the scheduled task, or None if

Task | None

the task could not be scheduled.

Source code in toolboxv2/utils/toolbox.py
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
def run_bg_task(self, task: Callable, *args, **kwargs) -> asyncio.Task | None:
    """
    Runs a coroutine in the background without blocking the caller.

    This is the primary method for "fire-and-forget" async tasks. It schedules
    the coroutine to run on the application's main event loop.

    Args:
        task: The coroutine function to run.
        *args: Arguments to pass to the coroutine function.
        **kwargs: Keyword arguments to pass to the coroutine function.

    Returns:
        An asyncio.Task object representing the scheduled task, or None if
        the task could not be scheduled.
    """
    if not callable(task):
        self.logger.warning("Task passed to run_bg_task is not callable!")
        return None

    if not asyncio.iscoroutinefunction(task) and not asyncio.iscoroutine(task):
        self.logger.warning(f"Task '{getattr(task, '__name__', 'unknown')}' is not a coroutine. "
                            f"Use run_bg_task_advanced for synchronous functions.")
        # Fallback to advanced runner for convenience
        self.run_bg_task_advanced(task, *args, **kwargs)
        return None

    try:
        loop = self.loop_gard()
        if not loop.is_running():
            # If the main loop isn't running, we can't create a task on it.
            # This scenario is handled by run_bg_task_advanced.
            self.logger.info("Main event loop not running. Delegating to advanced background runner.")
            return self.run_bg_task_advanced(task, *args, **kwargs)

        # Create the coroutine if it's a function
        coro = task(*args, **kwargs) if asyncio.iscoroutinefunction(task) else task

        # Create a task on the running event loop
        bg_task = loop.create_task(coro)

        # Add a callback to log exceptions from the background task
        def _log_exception(the_task: asyncio.Task):
            if not the_task.cancelled() and the_task.exception():
                self.logger.error(f"Exception in background task '{the_task.get_name()}':",
                                  exc_info=the_task.exception())

        bg_task.add_done_callback(_log_exception)
        self.bg_tasks.append(bg_task)
        return bg_task

    except Exception as e:
        self.logger.error(f"Failed to schedule background task: {e}", exc_info=True)
        return None
run_bg_task_advanced(task, *args, **kwargs)

Runs a task in a separate, dedicated background thread with its own event loop.

This is ideal for: 1. Running an async task from a synchronous context. 2. Launching a long-running, independent operation that should not interfere with the main application's event loop.

Parameters:

Name Type Description Default
task Callable

The function to run (can be sync or async).

required
*args

Arguments for the task.

()
**kwargs

Keyword arguments for the task.

{}

Returns:

Type Description
Thread

The threading.Thread object managing the background execution.

Source code in toolboxv2/utils/toolbox.py
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
def run_bg_task_advanced(self, task: Callable, *args, **kwargs) -> threading.Thread:
    """
    Runs a task in a separate, dedicated background thread with its own event loop.

    This is ideal for:
    1. Running an async task from a synchronous context.
    2. Launching a long-running, independent operation that should not
       interfere with the main application's event loop.

    Args:
        task: The function to run (can be sync or async).
        *args: Arguments for the task.
        **kwargs: Keyword arguments for the task.

    Returns:
        The threading.Thread object managing the background execution.
    """
    if not callable(task):
        self.logger.warning("Task for run_bg_task_advanced is not callable!")
        return None

    def thread_target():
        # Each thread gets its own event loop.
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        try:
            # Prepare the coroutine we need to run
            if asyncio.iscoroutinefunction(task):
                coro = task(*args, **kwargs)
            elif asyncio.iscoroutine(task):
                # It's already a coroutine object
                coro = task
            else:
                # It's a synchronous function, run it in an executor
                # to avoid blocking the new event loop.
                coro = loop.run_in_executor(None, lambda: task(*args, **kwargs))

            # Run the coroutine to completion
            result = loop.run_until_complete(coro)
            self.logger.debug(f"Advanced background task '{getattr(task, '__name__', 'unknown')}' completed.")
            if result is not None:
                self.logger.debug(f"Task result: {str(result)[:100]}")

        except Exception as e:
            self.logger.error(f"Error in advanced background task '{getattr(task, '__name__', 'unknown')}':",
                              exc_info=e)
        finally:
            # Cleanly shut down the event loop in this thread.
            try:
                all_tasks = asyncio.all_tasks(loop=loop)
                if all_tasks:
                    for t in all_tasks:
                        t.cancel()
                    loop.run_until_complete(asyncio.gather(*all_tasks, return_exceptions=True))
            finally:
                loop.close()
                asyncio.set_event_loop(None)

    # Create, start, and return the thread.
    # It's a daemon thread so it won't prevent the main app from exiting.
    t = threading.Thread(target=thread_target, daemon=True, name=f"BGTask-{getattr(task, '__name__', 'unknown')}")
    self.bg_tasks.append(t)
    t.start()
    return t
show_console(*args, **kwargs) staticmethod

proxi attr

Source code in toolboxv2/utils/toolbox.py
236
237
238
@staticmethod
def show_console(*args, **kwargs):
    """proxi attr"""
tb(name=None, mod_name='', helper='', version=None, test=True, restrict_in_virtual_mode=False, api=False, initial=False, exit_f=False, test_only=False, memory_cache=False, file_cache=False, request_as_kwarg=False, row=False, state=None, level=-1, memory_cache_max_size=100, memory_cache_ttl=300, samples=None, interface=None, pre_compute=None, post_compute=None, api_methods=None, websocket_handler=None)

A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Parameters:

Name Type Description Default
name str

The name to register the function under. Defaults to the function's own name.

None
mod_name str

The name of the module the function belongs to.

''
helper str

A helper string providing additional information about the function.

''
version str or None

The version of the function or module.

None
test bool

Flag to indicate if the function is for testing purposes.

True
restrict_in_virtual_mode bool

Flag to restrict the function in virtual mode.

False
api bool

Flag to indicate if the function is part of an API.

False
initial bool

Flag to indicate if the function should be executed at initialization.

False
exit_f bool

Flag to indicate if the function should be executed at exit.

False
test_only bool

Flag to indicate if the function should only be used for testing.

False
memory_cache bool

Flag to enable memory caching for the function.

False
request_as_kwarg bool

Flag to get request if the fuction is calld from api.

False
file_cache bool

Flag to enable file caching for the function.

False
row bool

rather to auto wrap the result in Result type default False means no row data aka result type

False
state bool or None

Flag to indicate if the function maintains state.

None
level int

The level of the function, used for prioritization or categorization.

-1
memory_cache_max_size int

Maximum size of the memory cache.

100
memory_cache_ttl int

Time-to-live for the memory cache entries.

300
samples list or dict or None

Samples or examples of function usage.

None
interface str

The interface type for the function.

None
pre_compute callable

A function to be called before the main function.

None
post_compute callable

A function to be called after the main function.

None
api_methods list[str]

default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.

None
websocket_handler str

The name of the websocket handler to use.

None

Returns:

Name Type Description
function

The decorated function with additional processing and registration capabilities.

Source code in toolboxv2/utils/toolbox.py
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
def tb(self, name=None,
       mod_name: str = "",
       helper: str = "",
       version: str | None = None,
       test: bool = True,
       restrict_in_virtual_mode: bool = False,
       api: bool = False,
       initial: bool = False,
       exit_f: bool = False,
       test_only: bool = False,
       memory_cache: bool = False,
       file_cache: bool = False,
       request_as_kwarg: bool = False,
       row: bool = False,
       state: bool | None = None,
       level: int = -1,
       memory_cache_max_size: int = 100,
       memory_cache_ttl: int = 300,
       samples: list or dict or None = None,
       interface: ToolBoxInterfaces or None or str = None,
       pre_compute=None,
       post_compute=None,
       api_methods=None,
       websocket_handler: str | None = None,
       ):
    """
A decorator for registering and configuring functions within a module.

This decorator is used to wrap functions with additional functionality such as caching, API conversion, and lifecycle management (initialization and exit). It also handles the registration of the function in the module's function registry.

Args:
    name (str, optional): The name to register the function under. Defaults to the function's own name.
    mod_name (str, optional): The name of the module the function belongs to.
    helper (str, optional): A helper string providing additional information about the function.
    version (str or None, optional): The version of the function or module.
    test (bool, optional): Flag to indicate if the function is for testing purposes.
    restrict_in_virtual_mode (bool, optional): Flag to restrict the function in virtual mode.
    api (bool, optional): Flag to indicate if the function is part of an API.
    initial (bool, optional): Flag to indicate if the function should be executed at initialization.
    exit_f (bool, optional): Flag to indicate if the function should be executed at exit.
    test_only (bool, optional): Flag to indicate if the function should only be used for testing.
    memory_cache (bool, optional): Flag to enable memory caching for the function.
    request_as_kwarg (bool, optional): Flag to get request if the fuction is calld from api.
    file_cache (bool, optional): Flag to enable file caching for the function.
    row (bool, optional): rather to auto wrap the result in Result type default False means no row data aka result type
    state (bool or None, optional): Flag to indicate if the function maintains state.
    level (int, optional): The level of the function, used for prioritization or categorization.
    memory_cache_max_size (int, optional): Maximum size of the memory cache.
    memory_cache_ttl (int, optional): Time-to-live for the memory cache entries.
    samples (list or dict or None, optional): Samples or examples of function usage.
    interface (str, optional): The interface type for the function.
    pre_compute (callable, optional): A function to be called before the main function.
    post_compute (callable, optional): A function to be called after the main function.
    api_methods (list[str], optional): default ["AUTO"] (GET if not params, POST if params) , GET, POST, PUT or DELETE.
    websocket_handler (str, optional): The name of the websocket handler to use.

Returns:
    function: The decorated function with additional processing and registration capabilities.
"""
    if interface is None:
        interface = "tb"
    if test_only and 'test' not in self.id:
        return lambda *args, **kwargs: args
    return self._create_decorator(interface,
                                  name,
                                  mod_name,
                                  level=level,
                                  restrict_in_virtual_mode=restrict_in_virtual_mode,
                                  helper=helper,
                                  api=api,
                                  version=version,
                                  initial=initial,
                                  exit_f=exit_f,
                                  test=test,
                                  samples=samples,
                                  state=state,
                                  pre_compute=pre_compute,
                                  post_compute=post_compute,
                                  memory_cache=memory_cache,
                                  file_cache=file_cache,
                                  request_as_kwarg=request_as_kwarg,
                                  row=row,
                                  api_methods=api_methods,
                                  memory_cache_max_size=memory_cache_max_size,
                                  memory_cache_ttl=memory_cache_ttl,
                                  websocket_handler=websocket_handler,
                                  )
wait_for_bg_tasks(timeout=None)

Wait for all background tasks to complete.

Parameters:

Name Type Description Default
timeout

Maximum time to wait (in seconds) for all tasks to complete. None means wait indefinitely.

None

Returns:

Name Type Description
bool

True if all tasks completed, False if timeout occurred

Source code in toolboxv2/utils/toolbox.py
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
def wait_for_bg_tasks(self, timeout=None):
    """
    Wait for all background tasks to complete.

    Args:
        timeout: Maximum time to wait (in seconds) for all tasks to complete.
                 None means wait indefinitely.

    Returns:
        bool: True if all tasks completed, False if timeout occurred
    """
    active_tasks = [t for t in self.bg_tasks if t.is_alive()]

    for task in active_tasks:
        task.join(timeout=timeout)
        if task.is_alive():
            return False

    return True
ws_broadcast(channel_id, payload, source_conn_id='python_broadcast') async

Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

Parameters:

Name Type Description Default
channel_id str

Der Kanal, an den gesendet werden soll.

required
payload dict

Ein Dictionary, das als JSON gesendet wird.

required
source_conn_id optional

Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.

'python_broadcast'
Source code in toolboxv2/utils/toolbox.py
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
async def ws_broadcast(self, channel_id: str, payload: dict, source_conn_id: str = "python_broadcast"):
    """
    Sendet eine Nachricht asynchron an alle Clients in einem Kanal/Raum.

    Args:
        channel_id: Der Kanal, an den gesendet werden soll.
        payload: Ein Dictionary, das als JSON gesendet wird.
        source_conn_id (optional): Die ID der ursprünglichen Verbindung, um Echos zu vermeiden.
    """
    if self._rust_ws_bridge is None:
        self.logger.error("Cannot broadcast WebSocket message: Rust bridge is not initialized.")
        return

    try:
        # Ruft die asynchrone Rust-Broadcast-Methode auf
        await self._rust_ws_bridge.broadcast_message(channel_id, json.dumps(payload), source_conn_id)
    except Exception as e:
        self.logger.error(f"Failed to broadcast WebSocket message to channel {channel_id}: {e}", exc_info=True)
ws_send(conn_id, payload) async

Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

Parameters:

Name Type Description Default
conn_id str

Die eindeutige ID der Zielverbindung.

required
payload dict

Ein Dictionary, das als JSON gesendet wird.

required
Source code in toolboxv2/utils/toolbox.py
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
async def ws_send(self, conn_id: str, payload: dict):
    """
    Sendet eine Nachricht asynchron an eine einzelne WebSocket-Verbindung.

    Args:
        conn_id: Die eindeutige ID der Zielverbindung.
        payload: Ein Dictionary, das als JSON gesendet wird.
    """
    if self._rust_ws_bridge is None:
        self.logger.error("Cannot send WebSocket message: Rust bridge is not initialized.")
        return

    try:
        # Ruft die asynchrone Rust-Methode auf und wartet auf deren Abschluss
        await self._rust_ws_bridge.send_message(conn_id, json.dumps(payload))
    except Exception as e:
        self.logger.error(f"Failed to send WebSocket message to {conn_id}: {e}", exc_info=True)

toolboxv2.show_console(show=True)

Source code in toolboxv2/utils/extras/show_and_hide_console.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
def show_console(show=True):
    global TBRUNNER_console_viabel
    """Brings up the Console Window."""
    try:
        if show and not TBRUNNER_console_viabel:
            # Show console
            ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 4)
            TBRUNNER_console_viabel = True
            return True
        elif not show and TBRUNNER_console_viabel:
            # Hide console
            ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
            TBRUNNER_console_viabel = False
            return True
    except:
        print(f"Could not show_console {show=}", )
        return False
    return False

Logging

toolboxv2.get_logger()

Source code in toolboxv2/utils/system/tb_logger.py
136
137
def get_logger() -> logging.Logger:
    return logging.getLogger(loggerNameOfToolboxv2)

toolboxv2.setup_logging(level, name=loggerNameOfToolboxv2, online_level=None, is_online=False, file_level=None, interminal=False, logs_directory='../logs', app_name='main')

Source code in toolboxv2/utils/system/tb_logger.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def setup_logging(level: int, name=loggerNameOfToolboxv2, online_level=None, is_online=False, file_level=None,
                  interminal=False, logs_directory="../logs", app_name="main"):
    global loggerNameOfToolboxv2

    if not online_level:
        online_level = level

    if not file_level:
        file_level = level

    if not os.path.exists(logs_directory):
        os.makedirs(logs_directory, exist_ok=True)
    if not os.path.exists(logs_directory + "/Logs.info"):
        open(f"{logs_directory}/Logs.info", "a").close()

    loggerNameOfToolboxv2 = name

    available_log_levels = [logging.CRITICAL, logging.FATAL, logging.ERROR, logging.WARNING, logging.WARN, logging.INFO,
                            logging.DEBUG, logging.NOTSET]

    if level not in available_log_levels:
        raise ValueError(f"level must be one of {available_log_levels}, but logging level is {level}")

    if online_level not in available_log_levels:
        raise ValueError(f"online_level must be one of {available_log_levels}, but logging level is {online_level}")

    if file_level not in available_log_levels:
        raise ValueError(f"file_level must be one of {available_log_levels}, but logging level is {file_level}")

    log_date = datetime.datetime.today().strftime('%Y-%m-%d')
    log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
    log_level_index = log_levels.index(logging.getLevelName(level))

    filename = f"Logs-{name}-{log_date}-{log_levels[log_level_index]}"
    log_filename = f"{logs_directory}/{filename}.log"

    log_info_data = {
        filename: 0,
        "H": "localhost",
        "P": 62435
    }

    with open(f"{logs_directory}/Logs.info") as li:
        log_info_data_str = li.read()
        try:
            log_info_data = eval(log_info_data_str)
        except SyntaxError:
            if log_info_data_str:
                print(Style.RED(Style.Bold("Could not parse log info data")))

        if filename not in log_info_data:
            log_info_data[filename] = 0

        if not os.path.exists(log_filename):
            log_info_data[filename] = 0
            print("new log file")

        if os.path.exists(log_filename):
            log_info_data[filename] += 1

            while os.path.exists(f"{logs_directory}/{filename}#{log_info_data[filename]}.log"):
                log_info_data[filename] += 1

            try:
                os.rename(log_filename,
                          f"{logs_directory}/{filename}#{log_info_data[filename]}.log")
            except PermissionError:
                print(Style.YELLOW(Style.Bold(f"Could not rename log file appending on {filename}")))

    with open(f"{logs_directory}/Logs.info", "w") as li:
        if len(log_info_data.keys()) >= 7:
            log_info_data = {
                filename: log_info_data[filename],
                "H": log_info_data["H"],
                "P": log_info_data["P"]
            }
        li.write(str(log_info_data))

    try:
        with open(log_filename, "a"):
            pass
    except OSError:
        log_filename = f"{logs_directory}/Logs-Test-{log_date}-{log_levels[log_level_index]}.log"
        with open(log_filename, "a"):
            pass

    logger = logging.getLogger(name)

    logger.setLevel(level)
    # Prevent logger from propagating to parent loggers
    logger.propagate = False

    terminal_format = f"{app_name} %(asctime)s %(levelname)s %(name)s - %(message)s"
    file_format = f"{app_name} %(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s:%(lineno)d - %(message)s"

    # Configure handlers
    handlers = []

    # File handler (always added)
    file_handler = logging.FileHandler(log_filename)
    file_handler.setFormatter(logging.Formatter(file_format))
    file_handler.setLevel(file_level)
    handlers.append(file_handler)

    # Terminal handler (if requested)
    if interminal:
        terminal_handler = logging.StreamHandler()
        terminal_handler.setFormatter(logging.Formatter(terminal_format))
        terminal_handler.setLevel(level)
        handlers.append(terminal_handler)

    # Socket handler (if requested)
    if is_online:
        socket_handler = SocketHandler(log_info_data["H"], log_info_data["P"])
        socket_handler.setFormatter(logging.Formatter(file_format))
        socket_handler.setLevel(online_level)
        handlers.append(socket_handler)

    # Add all handlers to logger
    for handler in handlers:
        logger.addHandler(handler)

    return logger, filename

Styling & Console Output

toolboxv2.Style

Source code in toolboxv2/utils/extras/Style.py
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
class Style:
    _END = '\33[0m'
    _BLACK = '\33[30m'
    _RED = '\33[31m'
    _GREEN = '\33[32m'
    _YELLOW = '\33[33m'
    _BLUE = '\33[34m'
    _MAGENTA = '\33[35m'
    _CYAN = '\33[36m'
    _WHITE = '\33[37m'

    _Bold = '\33[1m'
    _ITALIC = '\33[3m'
    _Underline = '\33[4m'
    _BLINK = '\33[5m'
    _BLINK2 = '\33[6m'
    _Reversed = '\33[7m'

    _BLACKBG = '\33[40m'
    _REDBG = '\33[41m'
    _GREENBG = '\33[42m'
    _YELLOWBG = '\33[43m'
    _BLUEBG = '\33[44m'
    _VIOLETBG = '\33[45m'
    _BEIGEBG = '\33[46m'
    _WHITEBG = '\33[47m'

    _GREY = '\33[90m'
    _RED2 = '\33[91m'
    _GREEN2 = '\33[92m'
    _YELLOW2 = '\33[93m'
    _BLUE2 = '\33[94m'
    _VIOLET2 = '\33[95m'
    _BEIGE2 = '\33[96m'
    _WHITE2 = '\33[97m'

    _GREYBG = '\33[100m'
    _REDBG2 = '\33[101m'
    _GREENBG2 = '\33[102m'
    _YELLOWBG2 = '\33[103m'
    _BLUEBG2 = '\33[104m'
    _VIOLETBG2 = '\33[105m'
    _BEIGEBG2 = '\33[106m'
    _WHITEBG2 = '\33[107m'

    style_dic = {
        "END": _END,
        "BLACK": _BLACK,
        "RED": _RED,
        "GREEN": _GREEN,
        "YELLOW": _YELLOW,
        "BLUE": _BLUE,
        "MAGENTA": _MAGENTA,
        "CYAN": _CYAN,
        "WHITE": _WHITE,
        "Bold": _Bold,
        "Underline": _Underline,
        "Reversed": _Reversed,

        "ITALIC": _ITALIC,
        "BLINK": _BLINK,
        "BLINK2": _BLINK2,
        "BLACKBG": _BLACKBG,
        "REDBG": _REDBG,
        "GREENBG": _GREENBG,
        "YELLOWBG": _YELLOWBG,
        "BLUEBG": _BLUEBG,
        "VIOLETBG": _VIOLETBG,
        "BEIGEBG": _BEIGEBG,
        "WHITEBG": _WHITEBG,
        "GREY": _GREY,
        "RED2": _RED2,
        "GREEN2": _GREEN2,
        "YELLOW2": _YELLOW2,
        "BLUE2": _BLUE2,
        "VIOLET2": _VIOLET2,
        "BEIGE2": _BEIGE2,
        "WHITE2": _WHITE2,
        "GREYBG": _GREYBG,
        "REDBG2": _REDBG2,
        "GREENBG2": _GREENBG2,
        "YELLOWBG2": _YELLOWBG2,
        "BLUEBG2": _BLUEBG2,
        "VIOLETBG2": _VIOLETBG2,
        "BEIGEBG2": _BEIGEBG2,
        "WHITEBG2": _WHITEBG2,

    }

    @staticmethod
    @text_save
    def END_():
        print(Style._END)

    @staticmethod
    @text_save
    def GREEN_():
        print(Style._GREEN)

    @staticmethod
    @text_save
    def BLUE(text: str):
        return Style._BLUE + text + Style._END

    @staticmethod
    @text_save
    def BLACK(text: str):
        return Style._BLACK + text + Style._END

    @staticmethod
    @text_save
    def RED(text: str):
        return Style._RED + text + Style._END

    @staticmethod
    @text_save
    def GREEN(text: str):
        return Style._GREEN + text + Style._END

    @staticmethod
    @text_save
    def YELLOW(text: str):
        return Style._YELLOW + text + Style._END

    @staticmethod
    @text_save
    def MAGENTA(text: str):
        return Style._MAGENTA + text + Style._END

    @staticmethod
    @text_save
    def CYAN(text: str):
        return Style._CYAN + text + Style._END

    @staticmethod
    @text_save
    def WHITE(text: str):
        return Style._WHITE + text + Style._END

    @staticmethod
    @text_save
    def Bold(text: str):
        return Style._Bold + text + Style._END

    @staticmethod
    @text_save
    def Underline(text: str):
        return Style._Underline + text + Style._END

    @staticmethod
    @text_save
    def Underlined(text: str):
        return Style._Underline + text + Style._END

    @staticmethod
    @text_save
    def Reversed(text: str):
        return Style._Reversed + text + Style._END

    @staticmethod
    @text_save
    def ITALIC(text: str):
        return Style._ITALIC + text + Style._END

    @staticmethod
    @text_save
    def BLINK(text: str):
        return Style._BLINK + text + Style._END

    @staticmethod
    @text_save
    def BLINK2(text: str):
        return Style._BLINK2 + text + Style._END

    @staticmethod
    @text_save
    def BLACKBG(text: str):
        return Style._BLACKBG + text + Style._END

    @staticmethod
    @text_save
    def REDBG(text: str):
        return Style._REDBG + text + Style._END

    @staticmethod
    @text_save
    def GREENBG(text: str):
        return Style._GREENBG + text + Style._END

    @staticmethod
    @text_save
    def YELLOWBG(text: str):
        return Style._YELLOWBG + text + Style._END

    @staticmethod
    @text_save
    def BLUEBG(text: str):
        return Style._BLUEBG + text + Style._END

    @staticmethod
    @text_save
    def VIOLETBG(text: str):
        return Style._VIOLETBG + text + Style._END

    @staticmethod
    @text_save
    def BEIGEBG(text: str):
        return Style._BEIGEBG + text + Style._END

    @staticmethod
    @text_save
    def WHITEBG(text: str):
        return Style._WHITEBG + text + Style._END

    @staticmethod
    @text_save
    def GREY(text: str):
        return Style._GREY + str(text) + Style._END

    @staticmethod
    @text_save
    def RED2(text: str):
        return Style._RED2 + text + Style._END

    @staticmethod
    @text_save
    def GREEN2(text: str):
        return Style._GREEN2 + text + Style._END

    @staticmethod
    @text_save
    def YELLOW2(text: str):
        return Style._YELLOW2 + text + Style._END

    @staticmethod
    @text_save
    def BLUE2(text: str):
        return Style._BLUE2 + text + Style._END

    @staticmethod
    @text_save
    def VIOLET2(text: str):
        return Style._VIOLET2 + text + Style._END

    @staticmethod
    @text_save
    def BEIGE2(text: str):
        return Style._BEIGE2 + text + Style._END

    @staticmethod
    @text_save
    def WHITE2(text: str):
        return Style._WHITE2 + text + Style._END

    @staticmethod
    @text_save
    def GREYBG(text: str):
        return Style._GREYBG + text + Style._END

    @staticmethod
    @text_save
    def REDBG2(text: str):
        return Style._REDBG2 + text + Style._END

    @staticmethod
    @text_save
    def GREENBG2(text: str):
        return Style._GREENBG2 + text + Style._END

    @staticmethod
    @text_save
    def YELLOWBG2(text: str):
        return Style._YELLOWBG2 + text + Style._END

    @staticmethod
    @text_save
    def BLUEBG2(text: str):
        return Style._BLUEBG2 + text + Style._END

    @staticmethod
    @text_save
    def VIOLETBG2(text: str):
        return Style._VIOLETBG2 + text + Style._END

    @staticmethod
    @text_save
    def BEIGEBG2(text: str):
        return Style._BEIGEBG2 + text + Style._END

    @staticmethod
    @text_save
    def WHITEBG2(text: str):
        return Style._WHITEBG2 + text + Style._END

    @staticmethod
    @text_save
    def loading_al(text: str):
        b = f"{text} /"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} -"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} \\"
        print(b)
        sleep(0.05)
        cls()
        b = f"{text} |"
        print(b)
        sleep(0.05)
        cls()

    @property
    def END(self):
        return self._END

    def color_demo(self):
        for color in self.style_dic:
            print(f"{color} -> {self.style_dic[color]}Effect{self._END}")

    @property
    def Underline2(self):
        return self._Underline

toolboxv2.Spinner

Enhanced Spinner with tqdm-like line rendering.

Source code in toolboxv2/utils/extras/Style.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
class Spinner:
    """
    Enhanced Spinner with tqdm-like line rendering.
    """
    SYMBOL_SETS = {
        "c": ["◐", "◓", "◑", "◒"],
        "b": ["▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃"],
        "d": ["⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"],
        "w": ["🌍", "🌎", "🌏"],
        "s": ["🌀   ", " 🌀  ", "  🌀 ", "   🌀", "  🌀 ", " 🌀  "],
        "+": ["+", "x"],
        "t": ["✶", "✸", "✹", "✺", "✹", "✷"]
    }

    def __init__(
        self,
        message: str = "Loading...",
        delay: float = 0.1,
        symbols=None,
        count_down: bool = False,
        time_in_s: float = 0
    ):
        """Initialize spinner with flexible configuration."""
        # Resolve symbol set.
        if isinstance(symbols, str):
            symbols = self.SYMBOL_SETS.get(symbols, None)

        # Default symbols if not provided.
        if symbols is None:
            symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

        # Test mode symbol set.
        if 'unittest' in sys.argv[0]:
            symbols = ['#', '=', '-']

        self.spinner = itertools.cycle(symbols)
        self.delay = delay
        self.message = message
        self.running = False
        self.spinner_thread = None
        self.max_t = time_in_s
        self.contd = count_down

        # Rendering management.
        self._is_primary = False
        self._start_time = 0

        # Central manager.
        self.manager = SpinnerManager()

    def _generate_render_line(self):
        """Generate the primary render line."""
        current_time = time.time()
        if self.contd:
            remaining = max(0, self.max_t - (current_time - self._start_time))
            time_display = f"{remaining:.2f}"
        else:
            time_display = f"{current_time - self._start_time:.2f}"

        symbol = next(self.spinner)
        return f"{symbol} {self.message} | {time_display}"

    def _generate_secondary_info(self):
        """Generate secondary spinner info for additional spinners."""
        return f"{self.message}"

    def __enter__(self):
        """Start the spinner."""
        self.running = True
        self._start_time = time.time()
        self.manager.register_spinner(self)
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Stop the spinner."""
        self.running = False
        self.manager.unregister_spinner(self)
        # Clear the spinner's line if it was the primary spinner.
        if self._is_primary:
            sys.stdout.write("\r\033[K")
            sys.stdout.flush()

__enter__()

Start the spinner.

Source code in toolboxv2/utils/extras/Style.py
644
645
646
647
648
649
def __enter__(self):
    """Start the spinner."""
    self.running = True
    self._start_time = time.time()
    self.manager.register_spinner(self)
    return self

__exit__(exc_type, exc_value, exc_traceback)

Stop the spinner.

Source code in toolboxv2/utils/extras/Style.py
651
652
653
654
655
656
657
658
def __exit__(self, exc_type, exc_value, exc_traceback):
    """Stop the spinner."""
    self.running = False
    self.manager.unregister_spinner(self)
    # Clear the spinner's line if it was the primary spinner.
    if self._is_primary:
        sys.stdout.write("\r\033[K")
        sys.stdout.flush()

__init__(message='Loading...', delay=0.1, symbols=None, count_down=False, time_in_s=0)

Initialize spinner with flexible configuration.

Source code in toolboxv2/utils/extras/Style.py
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
def __init__(
    self,
    message: str = "Loading...",
    delay: float = 0.1,
    symbols=None,
    count_down: bool = False,
    time_in_s: float = 0
):
    """Initialize spinner with flexible configuration."""
    # Resolve symbol set.
    if isinstance(symbols, str):
        symbols = self.SYMBOL_SETS.get(symbols, None)

    # Default symbols if not provided.
    if symbols is None:
        symbols = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]

    # Test mode symbol set.
    if 'unittest' in sys.argv[0]:
        symbols = ['#', '=', '-']

    self.spinner = itertools.cycle(symbols)
    self.delay = delay
    self.message = message
    self.running = False
    self.spinner_thread = None
    self.max_t = time_in_s
    self.contd = count_down

    # Rendering management.
    self._is_primary = False
    self._start_time = 0

    # Central manager.
    self.manager = SpinnerManager()

toolboxv2.remove_styles(text, infos=False)

Source code in toolboxv2/utils/extras/Style.py
384
385
386
387
388
389
390
391
392
393
394
395
def remove_styles(text: str, infos=False):
    in_ = []
    for key, style in Style.style_dic.items():
        if style in text:
            text = text.replace(style, '')
            if infos:
                in_.append([key for key, st in Style.style_dic.items() if style == st][0])
    if infos:
        if "END" in in_:
            in_.remove('END')
        return text, in_
    return text

Data Types & Structures

toolboxv2.AppArgs

Source code in toolboxv2/utils/system/types.py
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
class AppArgs:
    init = None
    init_file = 'init.config'
    get_version = False
    mm = False
    sm = False
    lm = False
    modi = 'cli'
    kill = False
    remote = False
    remote_direct_key = None
    background_application = False
    background_application_runner = False
    docker = False
    build = False
    install = None
    remove = None
    update = None
    name = 'main'
    port = 5000
    host = '0.0.0.0'
    load_all_mod_in_files = False
    mods_folder = 'toolboxv2.mods.'
    debug = None
    test = None
    profiler = None
    hot_reload = False
    live_application = True
    sysPrint = False
    kwargs = {}
    session = None

    def default(self):
        return self

    def set(self, name, value):
        setattr(self, name, value)
        return self

toolboxv2.Result

Source code in toolboxv2/utils/system/types.py
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
class Result:
    _task = None
    def __init__(self,
                 error: ToolBoxError,
                 result: ToolBoxResult,
                 info: ToolBoxInfo,
                 origin: Any | None = None,
                 ):
        self.error: ToolBoxError = error
        self.result: ToolBoxResult = result
        self.info: ToolBoxInfo = info
        self.origin = origin

    def as_result(self):
        return self

    def as_dict(self):
        return {
            "error":self.error.value if isinstance(self.error, Enum) else self.error,
        "result" : {
            "data_to":self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
            "data_info":self.result.data_info,
            "data":self.result.data,
            "data_type":self.result.data_type
        } if self.result else None,
        "info" : {
            "exec_code" : self.info.exec_code,  # exec_code umwandel in http resposn codes
        "help_text" : self.info.help_text
        } if self.info else None,
        "origin" : self.origin
        }

    def set_origin(self, origin):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = origin
        return self

    def set_dir_origin(self, name, extras="assets/"):
        if self.origin is not None:
            raise ValueError("You cannot Change the origin of a Result!")
        self.origin = f"mods/{name}/{extras}"
        return self

    def is_error(self):
        if _test_is_result(self.result.data):
            return self.result.data.is_error()
        if self.error == ToolBoxError.none:
            return False
        if self.info.exec_code == 0:
            return False
        return self.info.exec_code != 200

    def is_ok(self):
        return not self.is_error()

    def is_data(self):
        return self.result.data is not None

    def to_api_result(self):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResultBM(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfoBM(
                exec_code=self.info.exec_code,  # exec_code umwandel in http resposn codes
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def task(self, task):
        self._task = task
        return self

    @staticmethod
    def result_from_dict(error: str, result: dict, info: dict, origin: list or None or str):
        # print(f" error={self.error}, result= {self.result}, info= {self.info}, origin= {self.origin}")
        return ApiResult(
            error=error if isinstance(error, Enum) else error,
            result=ToolBoxResultBM(
                data_to=result.get('data_to') if isinstance(result.get('data_to'), Enum) else result.get('data_to'),
                data_info=result.get('data_info', '404'),
                data=result.get('data'),
                data_type=result.get('data_type', '404'),
            ) if result else ToolBoxResultBM(
                data_to=ToolBoxInterfaces.cli.value,
                data_info='',
                data='404',
                data_type='404',
            ),
            info=ToolBoxInfoBM(
                exec_code=info.get('exec_code', 404),
                help_text=info.get('help_text', '404')
            ) if info else ToolBoxInfoBM(
                exec_code=404,
                help_text='404'
            ),
            origin=origin
        ).as_result()

    @classmethod
    def stream(cls,
               stream_generator: Any,  # Renamed from source for clarity
               content_type: str = "text/event-stream",  # Default to SSE
               headers: dict | None = None,
               info: str = "OK",
               interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
               cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
        """
        Create a streaming response Result. Handles SSE and other stream types.

        Args:
            stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
            content_type: Content-Type header (default: text/event-stream for SSE).
            headers: Additional HTTP headers for the response.
            info: Help text for the result.
            interface: Interface to send data to.
            cleanup_func: Optional function for cleanup.

        Returns:
            A Result object configured for streaming.
        """
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        final_generator: AsyncGenerator[str, None]

        if content_type == "text/event-stream":
            # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
            # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
            final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

            # Standard SSE headers for the HTTP response itself
            # These will be stored in the Result object. Rust side decides how to use them.
            standard_sse_headers = {
                "Cache-Control": "no-cache",  # SSE specific
                "Connection": "keep-alive",  # SSE specific
                "X-Accel-Buffering": "no",  # Useful for proxies with SSE
                # Content-Type is implicitly text/event-stream, will be in streaming_data below
            }
            all_response_headers = standard_sse_headers.copy()
            if headers:
                all_response_headers.update(headers)
        else:
            # For non-SSE streams.
            # If stream_generator is sync, wrap it to be async.
            # If already async or single item, it will be handled.
            # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
            # For consistency with how SSEGenerator does it, we can wrap sync ones.
            if inspect.isgenerator(stream_generator) or \
                (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
                final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
            elif inspect.isasyncgen(stream_generator):
                final_generator = stream_generator
            else:  # Single item or string
                async def _single_item_gen():
                    yield stream_generator

                final_generator = _single_item_gen()
            all_response_headers = headers if headers else {}

        # Prepare streaming data to be stored in the Result object
        streaming_data = {
            "type": "stream",  # Indicator for Rust side
            "generator": final_generator,
            "content_type": content_type,  # Let Rust know the intended content type
            "headers": all_response_headers  # Intended HTTP headers for the overall response
        }

        result_payload = ToolBoxResult(
            data_to=interface,
            data=streaming_data,
            data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
            data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
        )

        return cls(error=error, info=info_obj, result=result_payload)

    @classmethod
    def sse(cls,
            stream_generator: Any,
            info: str = "OK",
            interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
            cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
            # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
            ):
        """
        Create an Server-Sent Events (SSE) streaming response Result.

        Args:
            stream_generator: A source yielding individual data items. This can be an
                              async generator, sync generator, iterable, or a single item.
                              Each item will be formatted as an SSE event.
            info: Optional help text for the Result.
            interface: Optional ToolBoxInterface to target.
            cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
            #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

        Returns:
            A Result object configured for SSE streaming.
        """
        # Result.stream will handle calling SSEGenerator.create_sse_stream
        # and setting appropriate default headers for SSE when content_type is "text/event-stream".
        return cls.stream(
            stream_generator=stream_generator,
            content_type="text/event-stream",
            # headers=http_headers, # Pass if we add http_headers param
            info=info,
            interface=interface,
            cleanup_func=cleanup_func
        )

    @classmethod
    def default(cls, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=-1, help_text="")
        result = ToolBoxResult(data_to=interface)
        return cls(error=error, info=info, result=result)

    @classmethod
    def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
        """Create a JSON response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=data,
            data_info="JSON response",
            data_type="json"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
        """Create a text response Result with specific content type."""
        if headers is not None:
            return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=text_data,
            data_info="Text response",
            data_type=content_type
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
               interface=ToolBoxInterfaces.remote):
        """Create a binary data response Result."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=0, help_text=info)

        # Create a dictionary with binary data and metadata
        binary_data = {
            "data": data,
            "content_type": content_type,
            "filename": download_name
        }

        result = ToolBoxResult(
            data_to=interface,
            data=binary_data,
            data_info=f"Binary response: {download_name}" if download_name else "Binary response",
            data_type="binary"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
        """Create a file download response Result.

        Args:
            data: File data as bytes or base64 string
            filename: Name of the file for download
            content_type: MIME type of the file (auto-detected if None)
            info: Response info text
            interface: Target interface

        Returns:
            Result object configured for file download
        """
        import base64
        import mimetypes

        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=200, help_text=info)

        # Auto-detect content type if not provided
        if content_type is None:
            content_type, _ = mimetypes.guess_type(filename)
            if content_type is None:
                content_type = "application/octet-stream"

        # Ensure data is base64 encoded string (as expected by Rust server)
        if isinstance(data, bytes):
            base64_data = base64.b64encode(data).decode('utf-8')
        elif isinstance(data, str):
            # Assume it's already base64 encoded
            base64_data = data
        else:
            raise ValueError("File data must be bytes or base64 string")

        result = ToolBoxResult(
            data_to=interface,
            data=base64_data,  # Rust expects base64 string for "file" type
            data_info=f"File download: {filename}",
            data_type="file"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
        """Create a redirect response."""
        error = ToolBoxError.none
        info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

        result = ToolBoxResult(
            data_to=interface,
            data=url,
            data_info="Redirect response",
            data_type="redirect"
        )

        return cls(error=error, info=info_obj, result=result)

    @classmethod
    def ok(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.native):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def html(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.remote, data_type="html",status=200, headers=None, row=False):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=status, help_text=info)
        from ...utils.system.getting_and_closing_app import get_app

        if not row and not '"<div class="main-content""' in data:
            data = f'<div class="main-content frosted-glass">{data}<div>'
        if not row and not get_app().web_context() in data:
            data = get_app().web_context() + data

        if isinstance(headers, dict):
            result = ToolBoxResult(data_to=interface, data={'html':data,'headers':headers}, data_info=data_info,
                                   data_type="special_html")
        else:
            result = ToolBoxResult(data_to=interface, data=data, data_info=data_info,
                                   data_type=data_type if data_type is not None else type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def future(cls, data=None, data_info="", info="OK", interface=ToolBoxInterfaces.future):
        error = ToolBoxError.none
        info = ToolBoxInfo(exec_code=0, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type="future")
        return cls(error=error, info=info, result=result)

    @classmethod
    def custom_error(cls, data=None, data_info="", info="", exec_code=-1, interface=ToolBoxInterfaces.native):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def error(cls, data=None, data_info="", info="", exec_code=450, interface=ToolBoxInterfaces.remote):
        error = ToolBoxError.custom_error
        info = ToolBoxInfo(exec_code=exec_code, help_text=info)
        result = ToolBoxResult(data_to=interface, data=data, data_info=data_info, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_user_error(cls, info="", exec_code=-3, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.input_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    @classmethod
    def default_internal_error(cls, info="", exec_code=-2, interface=ToolBoxInterfaces.native, data=None):
        error = ToolBoxError.internal_error
        info = ToolBoxInfo(exec_code, info)
        result = ToolBoxResult(data_to=interface, data=data, data_type=type(data).__name__)
        return cls(error=error, info=info, result=result)

    def print(self, show=True, show_data=True, prifix=""):
        data = '\n' + f"{((prifix + f'Data_{self.result.data_type}: ' + str(self.result.data) if self.result.data is not None else 'NO Data') if not isinstance(self.result.data, Result) else self.result.data.print(show=False, show_data=show_data, prifix=prifix + '-')) if show_data else 'Data: private'}"
        origin = '\n' + f"{prifix + 'Origin: ' + str(self.origin) if self.origin is not None else 'NO Origin'}"
        text = (f"Function Exec code: {self.info.exec_code}"
                f"\n{prifix}Info's:"
                f" {self.info.help_text} {'<|> ' + str(self.result.data_info) if self.result.data_info is not None else ''}"
                f"{origin}{(data[:100]+'...') if not data.endswith('NO Data') else ''}\n")
        if not show:
            return text
        print("\n======== Result ========\n" + text + "------- EndOfD -------")
        return self

    def log(self, show_data=True, prifix=""):
        from toolboxv2 import get_logger
        get_logger().debug(self.print(show=False, show_data=show_data, prifix=prifix).replace("\n", " - "))
        return self

    def __str__(self):
        return self.print(show=False, show_data=True)

    def get(self, key=None, default=None):
        data = self.result.data
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    async def aget(self, key=None, default=None):
        if asyncio.isfuture(self.result.data) or asyncio.iscoroutine(self.result.data) or (
            isinstance(self.result.data_to, Enum) and self.result.data_to.name == ToolBoxInterfaces.future.name):
            data = await self.result.data
        else:
            data = self.get(key=None, default=None)
        if isinstance(data, Result):
            return data.get(key=key, default=default)
        if key is not None and isinstance(data, dict):
            return data.get(key, default)
        return data if data is not None else default

    def lazy_return(self, _=0, data=None, **kwargs):
        flags = ['raise', 'logg', 'user', 'intern']
        flag = flags[_] if isinstance(_, int) else _
        if self.info.exec_code == 0:
            return self if data is None else data if _test_is_result(data) else self.ok(data=data, **kwargs)
        if flag == 'raise':
            raise ValueError(self.print(show=False))
        if flag == 'logg':
            from .. import get_logger
            get_logger().error(self.print(show=False))

        if flag == 'user':
            return self if data is None else data if _test_is_result(data) else self.default_user_error(data=data,
                                                                                                        **kwargs)
        if flag == 'intern':
            return self if data is None else data if _test_is_result(data) else self.default_internal_error(data=data,
                                                                                                            **kwargs)

        return self if data is None else data if _test_is_result(data) else self.custom_error(data=data, **kwargs)

    @property
    def bg_task(self):
        return self._task

binary(data, content_type='application/octet-stream', download_name=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a binary data response Result.

Source code in toolboxv2/utils/system/types.py
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
@classmethod
def binary(cls, data, content_type="application/octet-stream", download_name=None, info="OK",
           interface=ToolBoxInterfaces.remote):
    """Create a binary data response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    # Create a dictionary with binary data and metadata
    binary_data = {
        "data": data,
        "content_type": content_type,
        "filename": download_name
    }

    result = ToolBoxResult(
        data_to=interface,
        data=binary_data,
        data_info=f"Binary response: {download_name}" if download_name else "Binary response",
        data_type="binary"
    )

    return cls(error=error, info=info_obj, result=result)

file(data, filename, content_type=None, info='OK', interface=ToolBoxInterfaces.remote) classmethod

Create a file download response Result.

Parameters:

Name Type Description Default
data

File data as bytes or base64 string

required
filename

Name of the file for download

required
content_type

MIME type of the file (auto-detected if None)

None
info

Response info text

'OK'
interface

Target interface

remote

Returns:

Type Description

Result object configured for file download

Source code in toolboxv2/utils/system/types.py
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
@classmethod
def file(cls, data, filename, content_type=None, info="OK", interface=ToolBoxInterfaces.remote):
    """Create a file download response Result.

    Args:
        data: File data as bytes or base64 string
        filename: Name of the file for download
        content_type: MIME type of the file (auto-detected if None)
        info: Response info text
        interface: Target interface

    Returns:
        Result object configured for file download
    """
    import base64
    import mimetypes

    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=200, help_text=info)

    # Auto-detect content type if not provided
    if content_type is None:
        content_type, _ = mimetypes.guess_type(filename)
        if content_type is None:
            content_type = "application/octet-stream"

    # Ensure data is base64 encoded string (as expected by Rust server)
    if isinstance(data, bytes):
        base64_data = base64.b64encode(data).decode('utf-8')
    elif isinstance(data, str):
        # Assume it's already base64 encoded
        base64_data = data
    else:
        raise ValueError("File data must be bytes or base64 string")

    result = ToolBoxResult(
        data_to=interface,
        data=base64_data,  # Rust expects base64 string for "file" type
        data_info=f"File download: {filename}",
        data_type="file"
    )

    return cls(error=error, info=info_obj, result=result)

json(data, info='OK', interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None) classmethod

Create a JSON response Result.

Source code in toolboxv2/utils/system/types.py
844
845
846
847
848
849
850
851
852
853
854
855
856
857
@classmethod
def json(cls, data, info="OK", interface=ToolBoxInterfaces.remote, exec_code=0, status_code=None):
    """Create a JSON response Result."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code or exec_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=data,
        data_info="JSON response",
        data_type="json"
    )

    return cls(error=error, info=info_obj, result=result)

redirect(url, status_code=302, info='Redirect', interface=ToolBoxInterfaces.remote) classmethod

Create a redirect response.

Source code in toolboxv2/utils/system/types.py
943
944
945
946
947
948
949
950
951
952
953
954
955
956
@classmethod
def redirect(cls, url, status_code=302, info="Redirect", interface=ToolBoxInterfaces.remote):
    """Create a redirect response."""
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=status_code, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=url,
        data_info="Redirect response",
        data_type="redirect"
    )

    return cls(error=error, info=info_obj, result=result)

sse(stream_generator, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create an Server-Sent Events (SSE) streaming response Result.

Parameters:

Name Type Description Default
stream_generator Any

A source yielding individual data items. This can be an async generator, sync generator, iterable, or a single item. Each item will be formatted as an SSE event.

required
info str

Optional help text for the Result.

'OK'
interface ToolBoxInterfaces

Optional ToolBoxInterface to target.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional cleanup function to run when the stream ends or is cancelled.

None
#http_headers

Optional dictionary of custom HTTP headers for the SSE response.

required

Returns:

Type Description

A Result object configured for SSE streaming.

Source code in toolboxv2/utils/system/types.py
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
@classmethod
def sse(cls,
        stream_generator: Any,
        info: str = "OK",
        interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
        cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None,
        # http_headers: Optional[dict] = None # If we want to allow overriding default SSE HTTP headers
        ):
    """
    Create an Server-Sent Events (SSE) streaming response Result.

    Args:
        stream_generator: A source yielding individual data items. This can be an
                          async generator, sync generator, iterable, or a single item.
                          Each item will be formatted as an SSE event.
        info: Optional help text for the Result.
        interface: Optional ToolBoxInterface to target.
        cleanup_func: Optional cleanup function to run when the stream ends or is cancelled.
        #http_headers: Optional dictionary of custom HTTP headers for the SSE response.

    Returns:
        A Result object configured for SSE streaming.
    """
    # Result.stream will handle calling SSEGenerator.create_sse_stream
    # and setting appropriate default headers for SSE when content_type is "text/event-stream".
    return cls.stream(
        stream_generator=stream_generator,
        content_type="text/event-stream",
        # headers=http_headers, # Pass if we add http_headers param
        info=info,
        interface=interface,
        cleanup_func=cleanup_func
    )

stream(stream_generator, content_type='text/event-stream', headers=None, info='OK', interface=ToolBoxInterfaces.remote, cleanup_func=None) classmethod

Create a streaming response Result. Handles SSE and other stream types.

Parameters:

Name Type Description Default
stream_generator Any

Any stream source (async generator, sync generator, iterable, or single item).

required
content_type str

Content-Type header (default: text/event-stream for SSE).

'text/event-stream'
headers dict | None

Additional HTTP headers for the response.

None
info str

Help text for the result.

'OK'
interface ToolBoxInterfaces

Interface to send data to.

remote
cleanup_func Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None

Optional function for cleanup.

None

Returns:

Type Description

A Result object configured for streaming.

Source code in toolboxv2/utils/system/types.py
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
@classmethod
def stream(cls,
           stream_generator: Any,  # Renamed from source for clarity
           content_type: str = "text/event-stream",  # Default to SSE
           headers: dict | None = None,
           info: str = "OK",
           interface: ToolBoxInterfaces = ToolBoxInterfaces.remote,
           cleanup_func: Callable[[], None] | Callable[[], T] | Callable[[], AsyncGenerator[T, None]] | None = None):
    """
    Create a streaming response Result. Handles SSE and other stream types.

    Args:
        stream_generator: Any stream source (async generator, sync generator, iterable, or single item).
        content_type: Content-Type header (default: text/event-stream for SSE).
        headers: Additional HTTP headers for the response.
        info: Help text for the result.
        interface: Interface to send data to.
        cleanup_func: Optional function for cleanup.

    Returns:
        A Result object configured for streaming.
    """
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=0, help_text=info)

    final_generator: AsyncGenerator[str, None]

    if content_type == "text/event-stream":
        # For SSE, always use SSEGenerator.create_sse_stream to wrap the source.
        # SSEGenerator.create_sse_stream handles various types of stream_generator internally.
        final_generator = SSEGenerator.create_sse_stream(source=stream_generator, cleanup_func=cleanup_func)

        # Standard SSE headers for the HTTP response itself
        # These will be stored in the Result object. Rust side decides how to use them.
        standard_sse_headers = {
            "Cache-Control": "no-cache",  # SSE specific
            "Connection": "keep-alive",  # SSE specific
            "X-Accel-Buffering": "no",  # Useful for proxies with SSE
            # Content-Type is implicitly text/event-stream, will be in streaming_data below
        }
        all_response_headers = standard_sse_headers.copy()
        if headers:
            all_response_headers.update(headers)
    else:
        # For non-SSE streams.
        # If stream_generator is sync, wrap it to be async.
        # If already async or single item, it will be handled.
        # Rust's stream_generator in ToolboxClient seems to handle both sync/async Python generators.
        # For consistency with how SSEGenerator does it, we can wrap sync ones.
        if inspect.isgenerator(stream_generator) or \
            (not isinstance(stream_generator, str) and hasattr(stream_generator, '__iter__')):
            final_generator = SSEGenerator.wrap_sync_generator(stream_generator)  # Simple async wrapper
        elif inspect.isasyncgen(stream_generator):
            final_generator = stream_generator
        else:  # Single item or string
            async def _single_item_gen():
                yield stream_generator

            final_generator = _single_item_gen()
        all_response_headers = headers if headers else {}

    # Prepare streaming data to be stored in the Result object
    streaming_data = {
        "type": "stream",  # Indicator for Rust side
        "generator": final_generator,
        "content_type": content_type,  # Let Rust know the intended content type
        "headers": all_response_headers  # Intended HTTP headers for the overall response
    }

    result_payload = ToolBoxResult(
        data_to=interface,
        data=streaming_data,
        data_info="Streaming response" if content_type != "text/event-stream" else "SSE Event Stream",
        data_type="stream"  # Generic type for Rust to identify it needs to stream from 'generator'
    )

    return cls(error=error, info=info_obj, result=result_payload)

text(text_data, content_type='text/plain', exec_code=None, status=200, info='OK', interface=ToolBoxInterfaces.remote, headers=None) classmethod

Create a text response Result with specific content type.

Source code in toolboxv2/utils/system/types.py
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
@classmethod
def text(cls, text_data, content_type="text/plain",exec_code=None,status=200, info="OK", interface=ToolBoxInterfaces.remote, headers=None):
    """Create a text response Result with specific content type."""
    if headers is not None:
        return cls.html(text_data, status= exec_code or status, info=info, headers=headers)
    error = ToolBoxError.none
    info_obj = ToolBoxInfo(exec_code=exec_code or status, help_text=info)

    result = ToolBoxResult(
        data_to=interface,
        data=text_data,
        data_info="Text response",
        data_type=content_type
    )

    return cls(error=error, info=info_obj, result=result)

toolboxv2.ApiResult

Bases: BaseModel

Source code in toolboxv2/utils/system/types.py
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
class ApiResult(BaseModel):
    error: None | str= None
    origin: Any | None
    result: ToolBoxResultBM | None = None
    info: ToolBoxInfoBM | None

    def as_result(self):
        return Result(
            error=self.error.value if isinstance(self.error, Enum) else self.error,
            result=ToolBoxResult(
                data_to=self.result.data_to.value if isinstance(self.result.data_to, Enum) else self.result.data_to,
                data_info=self.result.data_info,
                data=self.result.data,
                data_type=self.result.data_type
            ) if self.result else None,
            info=ToolBoxInfo(
                exec_code=self.info.exec_code,
                help_text=self.info.help_text
            ) if self.info else None,
            origin=self.origin
        )

    def to_api_result(self):
        return self

    def print(self, *args, **kwargs):
        res = self.as_result().print(*args, **kwargs)
        if not isinstance(res, str):
            res = res.to_api_result()
        return res

toolboxv2.RequestData dataclass

Main class representing the complete request data structure.

Source code in toolboxv2/utils/system/types.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
@dataclass
class RequestData:
    """Main class representing the complete request data structure."""
    request: Request
    session: Session
    session_id: str

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
        """Create a RequestData instance from a dictionary."""
        return cls(
            request=Request.from_dict(data.get('request', {})),
            session=Session.from_dict(data.get('session', {})),
            session_id=data.get('session_id', '')
        )

    def to_dict(self) -> dict[str, Any]:
        """Convert the RequestData object back to a dictionary."""
        return {
            'request': self.request.to_dict(),
            'session': self.session.to_dict(),
            'session_id': self.session_id
        }

    def __getattr__(self, name: str) -> Any:
        """Delegate unknown attributes to the `request` object."""
        # Nur wenn das Attribut nicht direkt in RequestData existiert
        # und auch nicht `session` oder `session_id` ist
        if hasattr(self.request, name):
            return getattr(self.request, name)
        raise AttributeError(f"'RequestData' object has no attribute '{name}'")

    @classmethod
    def moc(cls):
        return cls(
            request=Request.from_dict({
                'content_type': 'application/x-www-form-urlencoded',
                'headers': {
                    'accept': '*/*',
                    'accept-encoding': 'gzip, deflate, br, zstd',
                    'accept-language': 'de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7',
                    'connection': 'keep-alive',
                    'content-length': '107',
                    'content-type': 'application/x-www-form-urlencoded',
                    'cookie': 'session=abc123',
                    'host': 'localhost:8080',
                    'hx-current-url': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'hx-request': 'true',
                    'hx-target': 'estimates-guest_1fc2c9',
                    'hx-trigger': 'config-form-guest_1fc2c9',
                    'origin': 'http://localhost:8080',
                    'referer': 'http://localhost:8080/api/TruthSeeker/get_main_ui',
                    'sec-ch-ua': '"Chromium";v="134", "Not:A-Brand";v="24", "Google Chrome";v="134"',
                    'sec-ch-ua-mobile': '?0',
                    'sec-ch-ua-platform': '"Windows"',
                    'sec-fetch-dest': 'empty',
                    'sec-fetch-mode': 'cors',
                    'sec-fetch-site': 'same-origin',
                    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
                },
                'method': 'POST',
                'path': '/api/TruthSeeker/update_estimates',
                'query_params': {},
                'form_data': {
                    'param1': 'value1',
                    'param2': 'value2'
                }
            }),
            session=Session.from_dict({
                'SiID': '29a2e258e18252e2afd5ff943523f09c82f1bb9adfe382a6f33fc6a8381de898',
                'level': '1',
                'spec': '74eed1c8de06886842e235486c3c2fd6bcd60586998ac5beb87f13c0d1750e1d',
                'user_name': 'root',
                'custom_field': 'custom_value'
            }),
            session_id='0x29dd1ac0d1e30d3f'
        )

__getattr__(name)

Delegate unknown attributes to the request object.

Source code in toolboxv2/utils/system/types.py
325
326
327
328
329
330
331
def __getattr__(self, name: str) -> Any:
    """Delegate unknown attributes to the `request` object."""
    # Nur wenn das Attribut nicht direkt in RequestData existiert
    # und auch nicht `session` oder `session_id` ist
    if hasattr(self.request, name):
        return getattr(self.request, name)
    raise AttributeError(f"'RequestData' object has no attribute '{name}'")

from_dict(data) classmethod

Create a RequestData instance from a dictionary.

Source code in toolboxv2/utils/system/types.py
308
309
310
311
312
313
314
315
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'RequestData':
    """Create a RequestData instance from a dictionary."""
    return cls(
        request=Request.from_dict(data.get('request', {})),
        session=Session.from_dict(data.get('session', {})),
        session_id=data.get('session_id', '')
    )

to_dict()

Convert the RequestData object back to a dictionary.

Source code in toolboxv2/utils/system/types.py
317
318
319
320
321
322
323
def to_dict(self) -> dict[str, Any]:
    """Convert the RequestData object back to a dictionary."""
    return {
        'request': self.request.to_dict(),
        'session': self.session.to_dict(),
        'session_id': self.session_id
    }

Security

toolboxv2.Code

Source code in toolboxv2/utils/security/cryp.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
class Code:

    @staticmethod
    def DK():
        return DEVICE_KEY

    def decode_code(self, encrypted_data, key=None):

        if not isinstance(encrypted_data, str):
            encrypted_data = str(encrypted_data)

        if key is None:
            key = DEVICE_KEY()

        return self.decrypt_symmetric(encrypted_data, key)

    def encode_code(self, data, key=None):

        if not isinstance(data, str):
            data = str(data)

        if key is None:
            key = DEVICE_KEY()

        return self.encrypt_symmetric(data, key)

    @staticmethod
    def generate_seed() -> int:
        """
        Erzeugt eine zufällige Zahl als Seed.

        Returns:
            int: Eine zufällige Zahl.
        """
        return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

    @staticmethod
    def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
        """
        Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

        Args:
            text (str): Der zu hashende Text.
            salt (str): Der Salt-Wert.
            pepper (str): Der Pepper-Wert.
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            str: Der resultierende Hash-Wert.
        """
        return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

    @staticmethod
    def generate_symmetric_key(as_str=True) -> str or bytes:
        """
        Generiert einen Schlüssel für die symmetrische Verschlüsselung.

        Returns:
            str: Der generierte Schlüssel.
        """
        key = Fernet.generate_key()
        if as_str:
            key = key.decode()
        return key

    @staticmethod
    def encrypt_symmetric(text: str or bytes, key: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.

        Returns:
            str: Der verschlüsselte Text.
        """
        if isinstance(text, str):
            text = text.encode()

        try:
            fernet = Fernet(key.encode())
            return fernet.encrypt(text).decode()
        except Exception as e:
            get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
            return "Error encrypt"

    @staticmethod
    def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
        """
        Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

        Args:
            encrypted_text (str): Der zu entschlüsselnde Text.
            key (str): Der symmetrische Schlüssel.
            to_str (bool): default true returns str if false returns bytes
        Returns:
            str: Der entschlüsselte Text.
        """

        if isinstance(key, str):
            key = key.encode()

        #try:
        fernet = Fernet(key)
        text_b = fernet.decrypt(encrypted_text)
        if not to_str:
            return text_b
        return text_b.decode()
        # except Exception as e:
        #     get_logger().error(f"Error decrypt_symmetric {e}")
        #     if not mute:
        #         raise e
        #     if not to_str:
        #         return f"Error decoding".encode()
        #     return f"Error decoding"

    @staticmethod
    def generate_asymmetric_keys() -> (str, str):
        """
        Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

        Args:
            seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
        """
        private_key = rsa.generate_private_key(
            public_exponent=65537,
            key_size=2048 * 3,
        )
        public_key = private_key.public_key()

        # Serialisieren der Schlüssel
        pem_private_key = private_key.private_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PrivateFormat.PKCS8,
            encryption_algorithm=serialization.NoEncryption()
        ).decode()

        pem_public_key = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        ).decode()

        return pem_public_key, pem_private_key

    @staticmethod
    def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
        """
        Speichert die generierten Schlüssel in separate Dateien.
        Der private Schlüssel wird mit dem Device Key verschlüsselt.

        Args:
            public_key (str): Der öffentliche Schlüssel im PEM-Format
            private_key (str): Der private Schlüssel im PEM-Format
            directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
        """
        # Erstelle das Verzeichnis, falls es nicht existiert
        os.makedirs(directory, exist_ok=True)

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Verschlüssele den privaten Schlüssel mit dem Device Key
        encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

        # Speichere den öffentlichen Schlüssel
        public_key_path = os.path.join(directory, "public_key.pem")
        with open(public_key_path, "w") as f:
            f.write(public_key)

        # Speichere den verschlüsselten privaten Schlüssel
        private_key_path = os.path.join(directory, "private_key.pem")
        with open(private_key_path, "w") as f:
            f.write(encrypted_private_key)

        print("Saved keys in ", public_key_path)

    @staticmethod
    def load_keys_from_files(directory: str = "keys") -> (str, str):
        """
        Lädt die Schlüssel aus den Dateien.
        Der private Schlüssel wird mit dem Device Key entschlüsselt.

        Args:
            directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

        Returns:
            (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

        Raises:
            FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
        """
        # Pfade zu den Schlüsseldateien
        public_key_path = os.path.join(directory, "public_key.pem")
        private_key_path = os.path.join(directory, "private_key.pem")

        # Prüfe ob die Dateien existieren
        if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
            return "", ""

        # Hole den Device Key
        device_key = DEVICE_KEY()

        # Lade den öffentlichen Schlüssel
        with open(public_key_path) as f:
            public_key = f.read()

        # Lade und entschlüssele den privaten Schlüssel
        with open(private_key_path) as f:
            encrypted_private_key = f.read()
            private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

        return public_key, private_key

    @staticmethod
    def encrypt_asymmetric(text: str, public_key_str: str) -> str:
        """
        Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

        Args:
            text (str): Der zu verschlüsselnde Text.
            public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

        Returns:
            str: Der verschlüsselte Text.
        """
        # try:
        #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        #  except Exception as e:
        #     get_logger().error(f"Error encrypt_asymmetric {e}")
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            encrypted = public_key.encrypt(
                text.encode(),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return encrypted.hex()
        except Exception as e:
            get_logger().error(f"Error encrypt_asymmetric {e}")
            return "Invalid"

    @staticmethod
    def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
        """
        Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

        Args:
            encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
            private_key_str (str): Der private Schlüssel als String.

        Returns:
            str: Der entschlüsselte Text.
        """
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            decrypted = private_key.decrypt(
                bytes.fromhex(encrypted_text_hex),
                padding.OAEP(
                    mgf=padding.MGF1(algorithm=hashes.SHA512()),
                    algorithm=hashes.SHA512(),
                    label=None
                )
            )
            return decrypted.decode()

        except Exception as e:
            get_logger().error(f"Error decrypt_asymmetric {e}")
        return "Invalid"

    @staticmethod
    def verify_signature(signature: str or bytes, message: str or bytes, public_key_str: str,
                         salt_length=padding.PSS.MAX_LENGTH) -> bool:
        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                padding=padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                algorithm=hashes.SHA512()
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def verify_signature_web_algo(signature: str or bytes, message: str or bytes, public_key_str: str,
                                  algo: int = -512) -> bool:
        signature_algorithm = ECDSA(hashes.SHA512())
        if algo != -512:
            signature_algorithm = ECDSA(hashes.SHA256())

        if isinstance(signature, str):
            signature = signature.encode()
        if isinstance(message, str):
            message = message.encode()
        try:
            public_key = serialization.load_pem_public_key(public_key_str.encode())
            public_key.verify(
                signature=signature,
                data=message,
                # padding=padding.PSS(
                #    mgf=padding.MGF1(hashes.SHA512()),
                #    salt_length=padding.PSS.MAX_LENGTH
                # ),
                signature_algorithm=signature_algorithm
            )
            return True
        except:
            pass
        return False

    @staticmethod
    def create_signature(message: str, private_key_str: str, salt_length=padding.PSS.MAX_LENGTH,
                         row=False) -> str or bytes:
        try:
            private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
            signature = private_key.sign(
                message.encode(),
                padding.PSS(
                    mgf=padding.MGF1(hashes.SHA512()),
                    salt_length=salt_length
                ),
                hashes.SHA512()
            )
            if row:
                return signature
            return base64.b64encode(signature).decode()
        except Exception as e:
            get_logger().error(f"Error create_signature {e}")
            print(e)
        return "Invalid Key"

    @staticmethod
    def pem_to_public_key(pem_key: str):
        """
        Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

        Args:
            pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

        Returns:
            PublicKey: Das PublicKey-Objekt.
        """
        public_key = serialization.load_pem_public_key(pem_key.encode())
        return public_key

    @staticmethod
    def public_key_to_pem(public_key: RSAPublicKey):
        """
        Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

        Args:
            public_key (PublicKey): Das PublicKey-Objekt.

        Returns:
            str: Der PEM-kodierte öffentliche Schlüssel.
        """
        pem = public_key.public_bytes(
            encoding=serialization.Encoding.PEM,
            format=serialization.PublicFormat.SubjectPublicKeyInfo
        )
        return pem.decode()

decrypt_asymmetric(encrypted_text_hex, private_key_str) staticmethod

Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

Parameters:

Name Type Description Default
encrypted_text_hex str

Der verschlüsselte Text als Hex-String.

required
private_key_str str

Der private Schlüssel als String.

required

Returns:

Name Type Description
str str

Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
@staticmethod
def decrypt_asymmetric(encrypted_text_hex: str, private_key_str: str) -> str:
    """
    Entschlüsselt einen Text mit einem gegebenen privaten Schlüssel.

    Args:
        encrypted_text_hex (str): Der verschlüsselte Text als Hex-String.
        private_key_str (str): Der private Schlüssel als String.

    Returns:
        str: Der entschlüsselte Text.
    """
    try:
        private_key = serialization.load_pem_private_key(private_key_str.encode(), password=None)
        decrypted = private_key.decrypt(
            bytes.fromhex(encrypted_text_hex),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return decrypted.decode()

    except Exception as e:
        get_logger().error(f"Error decrypt_asymmetric {e}")
    return "Invalid"

decrypt_symmetric(encrypted_text, key, to_str=True, mute=False) staticmethod

Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
encrypted_text str

Der zu entschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required
to_str bool

default true returns str if false returns bytes

True

Returns: str: Der entschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
@staticmethod
def decrypt_symmetric(encrypted_text: str, key: str, to_str=True, mute=False) -> str or bytes:
    """
    Entschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        encrypted_text (str): Der zu entschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.
        to_str (bool): default true returns str if false returns bytes
    Returns:
        str: Der entschlüsselte Text.
    """

    if isinstance(key, str):
        key = key.encode()

    #try:
    fernet = Fernet(key)
    text_b = fernet.decrypt(encrypted_text)
    if not to_str:
        return text_b
    return text_b.decode()

encrypt_asymmetric(text, public_key_str) staticmethod

Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
public_key_str str

Der öffentliche Schlüssel als String oder im pem format.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
@staticmethod
def encrypt_asymmetric(text: str, public_key_str: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen öffentlichen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        public_key_str (str): Der öffentliche Schlüssel als String oder im pem format.

    Returns:
        str: Der verschlüsselte Text.
    """
    # try:
    #    public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
    #  except Exception as e:
    #     get_logger().error(f"Error encrypt_asymmetric {e}")
    try:
        public_key: RSAPublicKey = serialization.load_pem_public_key(public_key_str.encode())
        encrypted = public_key.encrypt(
            text.encode(),
            padding.OAEP(
                mgf=padding.MGF1(algorithm=hashes.SHA512()),
                algorithm=hashes.SHA512(),
                label=None
            )
        )
        return encrypted.hex()
    except Exception as e:
        get_logger().error(f"Error encrypt_asymmetric {e}")
        return "Invalid"

encrypt_symmetric(text, key) staticmethod

Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

Parameters:

Name Type Description Default
text str

Der zu verschlüsselnde Text.

required
key str

Der symmetrische Schlüssel.

required

Returns:

Name Type Description
str str

Der verschlüsselte Text.

Source code in toolboxv2/utils/security/cryp.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
@staticmethod
def encrypt_symmetric(text: str or bytes, key: str) -> str:
    """
    Verschlüsselt einen Text mit einem gegebenen symmetrischen Schlüssel.

    Args:
        text (str): Der zu verschlüsselnde Text.
        key (str): Der symmetrische Schlüssel.

    Returns:
        str: Der verschlüsselte Text.
    """
    if isinstance(text, str):
        text = text.encode()

    try:
        fernet = Fernet(key.encode())
        return fernet.encrypt(text).decode()
    except Exception as e:
        get_logger().error(f"Error encrypt_symmetric #{str(e)}#")
        return "Error encrypt"

generate_asymmetric_keys() staticmethod

Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

Parameters:

Name Type Description Default
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
@staticmethod
def generate_asymmetric_keys() -> (str, str):
    """
    Generiert ein Paar von öffentlichen und privaten Schlüsseln für die asymmetrische Verschlüsselung.

    Args:
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel.
    """
    private_key = rsa.generate_private_key(
        public_exponent=65537,
        key_size=2048 * 3,
    )
    public_key = private_key.public_key()

    # Serialisieren der Schlüssel
    pem_private_key = private_key.private_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PrivateFormat.PKCS8,
        encryption_algorithm=serialization.NoEncryption()
    ).decode()

    pem_public_key = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    ).decode()

    return pem_public_key, pem_private_key

generate_seed() staticmethod

Erzeugt eine zufällige Zahl als Seed.

Returns:

Name Type Description
int int

Eine zufällige Zahl.

Source code in toolboxv2/utils/security/cryp.py
101
102
103
104
105
106
107
108
109
@staticmethod
def generate_seed() -> int:
    """
    Erzeugt eine zufällige Zahl als Seed.

    Returns:
        int: Eine zufällige Zahl.
    """
    return random.randint(2 ** 32 - 1, 2 ** 64 - 1)

generate_symmetric_key(as_str=True) staticmethod

Generiert einen Schlüssel für die symmetrische Verschlüsselung.

Returns:

Name Type Description
str str or bytes

Der generierte Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
127
128
129
130
131
132
133
134
135
136
137
138
@staticmethod
def generate_symmetric_key(as_str=True) -> str or bytes:
    """
    Generiert einen Schlüssel für die symmetrische Verschlüsselung.

    Returns:
        str: Der generierte Schlüssel.
    """
    key = Fernet.generate_key()
    if as_str:
        key = key.decode()
    return key

load_keys_from_files(directory='keys') staticmethod

Lädt die Schlüssel aus den Dateien. Der private Schlüssel wird mit dem Device Key entschlüsselt.

Parameters:

Name Type Description Default
directory str

Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

'keys'

Returns:

Type Description
(str, str)

Ein Tupel aus öffentlichem und privatem Schlüssel

Raises:

Type Description
FileNotFoundError

Wenn die Schlüsseldateien nicht gefunden werden können

Source code in toolboxv2/utils/security/cryp.py
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
@staticmethod
def load_keys_from_files(directory: str = "keys") -> (str, str):
    """
    Lädt die Schlüssel aus den Dateien.
    Der private Schlüssel wird mit dem Device Key entschlüsselt.

    Args:
        directory (str): Das Verzeichnis, aus dem die Schlüssel geladen werden sollen

    Returns:
        (str, str): Ein Tupel aus öffentlichem und privatem Schlüssel

    Raises:
        FileNotFoundError: Wenn die Schlüsseldateien nicht gefunden werden können
    """
    # Pfade zu den Schlüsseldateien
    public_key_path = os.path.join(directory, "public_key.pem")
    private_key_path = os.path.join(directory, "private_key.pem")

    # Prüfe ob die Dateien existieren
    if not os.path.exists(public_key_path) or not os.path.exists(private_key_path):
        return "", ""

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Lade den öffentlichen Schlüssel
    with open(public_key_path) as f:
        public_key = f.read()

    # Lade und entschlüssele den privaten Schlüssel
    with open(private_key_path) as f:
        encrypted_private_key = f.read()
        private_key = Code.decrypt_symmetric(encrypted_private_key, device_key)

    return public_key, private_key

one_way_hash(text, salt='', pepper='') staticmethod

Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

Parameters:

Name Type Description Default
text str

Der zu hashende Text.

required
salt str

Der Salt-Wert.

''
pepper str

Der Pepper-Wert.

''
seed int

Ein optionaler Seed-Wert. Standardmäßig None.

required

Returns:

Name Type Description
str str

Der resultierende Hash-Wert.

Source code in toolboxv2/utils/security/cryp.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
@staticmethod
def one_way_hash(text: str, salt: str = '', pepper: str = '') -> str:
    """
    Erzeugt einen Hash eines gegebenen Textes mit Salt, Pepper und optional einem Seed.

    Args:
        text (str): Der zu hashende Text.
        salt (str): Der Salt-Wert.
        pepper (str): Der Pepper-Wert.
        seed (int, optional): Ein optionaler Seed-Wert. Standardmäßig None.

    Returns:
        str: Der resultierende Hash-Wert.
    """
    return hashlib.sha256((salt + text + pepper).encode()).hexdigest()

pem_to_public_key(pem_key) staticmethod

Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

Parameters:

Name Type Description Default
pem_key str

Der PEM-kodierte öffentliche Schlüssel.

required

Returns:

Name Type Description
PublicKey

Das PublicKey-Objekt.

Source code in toolboxv2/utils/security/cryp.py
422
423
424
425
426
427
428
429
430
431
432
433
434
@staticmethod
def pem_to_public_key(pem_key: str):
    """
    Konvertiert einen PEM-kodierten öffentlichen Schlüssel in ein PublicKey-Objekt.

    Args:
        pem_key (str): Der PEM-kodierte öffentliche Schlüssel.

    Returns:
        PublicKey: Das PublicKey-Objekt.
    """
    public_key = serialization.load_pem_public_key(pem_key.encode())
    return public_key

public_key_to_pem(public_key) staticmethod

Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

Parameters:

Name Type Description Default
public_key PublicKey

Das PublicKey-Objekt.

required

Returns:

Name Type Description
str

Der PEM-kodierte öffentliche Schlüssel.

Source code in toolboxv2/utils/security/cryp.py
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
@staticmethod
def public_key_to_pem(public_key: RSAPublicKey):
    """
    Konvertiert ein PublicKey-Objekt in einen PEM-kodierten String.

    Args:
        public_key (PublicKey): Das PublicKey-Objekt.

    Returns:
        str: Der PEM-kodierte öffentliche Schlüssel.
    """
    pem = public_key.public_bytes(
        encoding=serialization.Encoding.PEM,
        format=serialization.PublicFormat.SubjectPublicKeyInfo
    )
    return pem.decode()

save_keys_to_files(public_key, private_key, directory='keys') staticmethod

Speichert die generierten Schlüssel in separate Dateien. Der private Schlüssel wird mit dem Device Key verschlüsselt.

Parameters:

Name Type Description Default
public_key str

Der öffentliche Schlüssel im PEM-Format

required
private_key str

Der private Schlüssel im PEM-Format

required
directory str

Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen

'keys'
Source code in toolboxv2/utils/security/cryp.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@staticmethod
def save_keys_to_files(public_key: str, private_key: str, directory: str = "keys") -> None:
    """
    Speichert die generierten Schlüssel in separate Dateien.
    Der private Schlüssel wird mit dem Device Key verschlüsselt.

    Args:
        public_key (str): Der öffentliche Schlüssel im PEM-Format
        private_key (str): Der private Schlüssel im PEM-Format
        directory (str): Das Verzeichnis, in dem die Schlüssel gespeichert werden sollen
    """
    # Erstelle das Verzeichnis, falls es nicht existiert
    os.makedirs(directory, exist_ok=True)

    # Hole den Device Key
    device_key = DEVICE_KEY()

    # Verschlüssele den privaten Schlüssel mit dem Device Key
    encrypted_private_key = Code.encrypt_symmetric(private_key, device_key)

    # Speichere den öffentlichen Schlüssel
    public_key_path = os.path.join(directory, "public_key.pem")
    with open(public_key_path, "w") as f:
        f.write(public_key)

    # Speichere den verschlüsselten privaten Schlüssel
    private_key_path = os.path.join(directory, "private_key.pem")
    with open(private_key_path, "w") as f:
        f.write(encrypted_private_key)

    print("Saved keys in ", public_key_path)

Modules & Flows

toolboxv2.mods

Canvas

Tools

Bases: MainTool

Source code in toolboxv2/mods/Canvas.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
class Tools(MainTool):  # Removed EventManager for simplicity, as it was causing the issue. Direct SSE is better here.
    def __init__(self, app: App):
        self.name = MOD_NAME
        self.version = VERSION
        self.color = "GREEN"
        self.tools_dict = {"name": MOD_NAME, "Version": self.show_version}

        # Canvas specific state
        self.live_canvas_sessions: dict[str, list[asyncio.Queue]] = defaultdict(list)
        self.active_user_previews: dict[str, dict[str, Any]] = defaultdict(dict)
        self.previews_lock = asyncio.Lock()

        MainTool.__init__(self, load=on_start, v=self.version, tool=self.tools_dict, name=self.name,
                          color=self.color, app=app)
        self.app.logger.info(f"Canvas Tools (v{self.version}) initialized for app {self.app.id}.")

    @property
    def db_mod(self):
        db = self.app.get_mod("DB", spec=Name)
        if db.mode.value != "CLUSTER_BLOB":
            db.edit_cli("CB")
        return db

    def _broadcast_to_canvas_listeners(self, canvas_id: str, event_type: str, data: dict[str, Any],
                                       originator_user_id: str | None = None):
        """
        Creates a broadcast coroutine and submits it to the app's dedicated
        async manager to be run in the background.
        This is now a non-blocking fire-and-forget operation.
        """

        async def broadcast_coro():
            if canvas_id not in self.live_canvas_sessions:
                return

            message_obj = {
                "event": event_type,
                "data": json.dumps({
                    "canvas_id": canvas_id,
                    "originator_user_id": originator_user_id,
                    **data
                })
            }

            listeners = list(self.live_canvas_sessions.get(canvas_id, []))

            for q in listeners:
                try:
                    # Non-blocking put. If the queue is full, the client is lagging,
                    # and it's better to drop a message than to block the server.
                    q.put_nowait(message_obj)
                except asyncio.QueueFull:
                    self.app.logger.warning(
                        f"SSE queue full for canvas {canvas_id}. Message '{event_type}' dropped for one client.")
                except Exception as e:
                    self.app.logger.error(f"Error putting message on SSE queue: {e}")

        # Use the app's robust background runner to execute immediately and not block the caller.
        self.app.run_bg_task(broadcast_coro)

    def show_version(self):
        self.app.logger.info(f"{self.name} Version: {self.version}")
        return self.version

    async def _get_user_specific_db_key(self, request: RequestData, base_key: str) -> str | None:
        # This logic is correct and can remain as is.

        user = await get_user_from_request(self.app, request)
        if user and user.uid:
            return f"{base_key}_{user.uid}"
        self.print("ok")
        # Fallback for public/guest access if you want to support it
        return f"{base_key}_public"

handle_send_canvas_action(app, request, data) async

Handles incremental, real-time actions from clients (e.g., adding an element). It persists the change to the database and then broadcasts it to all live listeners.

Source code in toolboxv2/mods/Canvas.py
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="send_canvas_action", api_methods=['POST'],
        request_as_kwarg=True)
async def handle_send_canvas_action(app: App, request: RequestData, data: dict[str, Any]):
    """
    Handles incremental, real-time actions from clients (e.g., adding an element).
    It persists the change to the database and then broadcasts it to all live listeners.
    """
    canvas_tool = app.get_mod(MOD_NAME)
    if not canvas_tool or not canvas_tool.db_mod:
        return Result.default_internal_error("Canvas module or DB not loaded.")

    if not data:
        return Result.default_user_error("Request data is missing.", 400)

    canvas_id = data.get("canvas_id")
    action_type = data.get("action_type")
    action_payload = data.get("payload")
    user_id = data.get("user_id")

    if not all([canvas_id, action_type, user_id]) or action_payload is None:
        return Result.default_user_error("Request missing required fields.", 400)

    # --- Flow 1: Ephemeral 'preview' actions that DO NOT get persisted ---
    if action_type in ["preview_update", "preview_clear"]:
        sse_event_type = "user_preview_update" if action_type == "preview_update" else "clear_user_preview"
        sse_data = {"user_id": user_id}

        async with canvas_tool.previews_lock:
            if action_type == "preview_update":
                canvas_tool.active_user_previews[canvas_id][user_id] = action_payload
                sse_data["preview_data"] = action_payload
            elif user_id in canvas_tool.active_user_previews.get(canvas_id, {}):
                del canvas_tool.active_user_previews[canvas_id][user_id]

        # MODIFICATION: Call the non-blocking broadcast method. This returns immediately.
        canvas_tool._broadcast_to_canvas_listeners(
            canvas_id=canvas_id, event_type=sse_event_type,
            data=sse_data, originator_user_id=user_id
        )
        return Result.ok(info=f"'{action_type}' broadcasted.")

    # --- Flow 2: Persistent actions that modify the canvas state ---
    if action_type not in ["element_add", "element_update", "element_remove"]:
        return Result.default_user_error(f"Unknown persistent action_type: {action_type}", 400)

    # Load the full, current session state from the database
    user_db_key_base = await canvas_tool._get_user_specific_db_key(request, SESSION_DATA_PREFIX)
    session_db_key = f"{user_db_key_base}_{canvas_id}"
    try:
        db_result = canvas_tool.db_mod.get(session_db_key)
        if not db_result or db_result.is_error() or not db_result.get():
            return Result.default_user_error("Canvas session not found in database.", 404)

        session_data_str = db_result.get()[0] if isinstance(db_result.get(), list) else db_result.get()
        session_data = IdeaSessionData.model_validate_json(session_data_str)
    except Exception as e:
        app.logger.error(f"DB Load/Parse failed for C:{canvas_id}. Error: {e}", exc_info=True)
        return Result.default_internal_error("Could not load canvas data to apply changes.")

    # Apply the action to the in-memory Pydantic object
    if action_type == "element_add":
        session_data.canvas_elements.append(CanvasElement(**action_payload))
    elif action_type == "element_update":
        element_id = action_payload.get("id")
        for i, el in enumerate(session_data.canvas_elements):
            if el.id == element_id:
                session_data.canvas_elements[i] = el.model_copy(update=action_payload)
                break
    elif action_type == "element_remove":
        ids_to_remove = set(action_payload.get("ids", [action_payload.get("id")]))
        session_data.canvas_elements = [el for el in session_data.canvas_elements if el.id not in ids_to_remove]

    # Save the modified object back to the database
    session_data.last_modified = datetime.now(UTC).timestamp()
    canvas_tool.db_mod.set(session_db_key, session_data.model_dump_json(exclude_none=True))

    # Broadcast the successful, persisted action to all connected clients
    # MODIFICATION: Call the non-blocking broadcast method.
    canvas_tool._broadcast_to_canvas_listeners(
        canvas_id=canvas_id,
        event_type="canvas_elements_changed",
        data={"action": action_type, "element": action_payload},
        originator_user_id=user_id
    )

    # Clear the temporary preview of the user who made the change
    async with canvas_tool.previews_lock:
        if user_id in canvas_tool.active_user_previews.get(canvas_id, {}):
            del canvas_tool.active_user_previews[canvas_id][user_id]

    # MODIFICATION: Call the non-blocking broadcast method.
    canvas_tool._broadcast_to_canvas_listeners(
        canvas_id=canvas_id, event_type="clear_user_preview",
        data={"user_id": user_id}, originator_user_id=user_id
    )

    return Result.ok(info=f"Action '{action_type}' persisted and broadcast.")

markdown_to_svg(self, request, markdown_text='', width=400, font_family='sans-serif', font_size=14, bg_color='#ffffff', text_color='#000000') async

Converts a string of Markdown text into an SVG image. The SVG is returned as a base64 encoded data URL. This version uses a viewBox for better scalability and multi-line handling.

Source code in toolboxv2/mods/Canvas.py
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="markdown_to_svg", api_methods=['POST'],
        request_as_kwarg=True)
async def markdown_to_svg(self, request: RequestData, markdown_text: str = "", width: int = 400,
                          font_family: str = "sans-serif", font_size: int = 14,
                          bg_color: str = "#ffffff", text_color: str = "#000000") -> Result:
    """
    Converts a string of Markdown text into an SVG image.
    The SVG is returned as a base64 encoded data URL.
    This version uses a viewBox for better scalability and multi-line handling.
    """
    if request is None:
        return Result.default_user_error("Request data is missing.", 400)
    if not markdown_text and request.data:
        markdown_text = request.data.get("markdown_text", "")

    if not markdown_text:
        return Result.default_user_error("markdown_text cannot be empty.")

    try:
        # Convert Markdown to HTML
        html_content = markdown2.markdown(markdown_text, extras=["fenced-code-blocks", "tables", "strike"])

        # --- FIX for Multi-line text ---
        # The key is to NOT set a fixed height on the SVG itself, but to use a viewBox.
        # The client will determine the final rendered size.
        # The width of the div inside the foreignObject controls the line wrapping.

        # We still need a rough height for the viewBox.
        # Estimate height: (number of lines * line-height) + padding
        # A simple line-height estimate is font_size * 1.6
        line_height_estimate = font_size * 1.6
        num_lines_estimate = len(html_content.split('\n')) + html_content.count('<br') + html_content.count(
            '<p>') + html_content.count('<li>')
        estimated_height = (num_lines_estimate * line_height_estimate) + 40  # 20px top/bottom padding

        svg_template = f"""
        <svg viewBox="0 0 {width} {int(estimated_height)}" xmlns="http://www.w3.org/2000/svg">
            <foreignObject x="0" y="0" width="{width}" height="{int(estimated_height)}">
                <div xmlns="http://www.w3.org/1999/xhtml">
                    <style>
                        div {{
                            font-family: {font_family};
                            font-size: {font_size}px;
                            color: {text_color};
                            background-color: {bg_color};
                            padding: 10px;
                            border-radius: 5px;
                            line-height: 1.6;
                            width: {width - 20}px; /* Width minus padding */
                            word-wrap: break-word;
                            height: 100%;
                            overflow-y: auto; /* Allow scrolling if content overflows estimate */
                        }}
                        h1, h2, h3 {{ border-bottom: 1px solid #ccc; padding-bottom: 5px; margin-top: 1em; }}
                        pre {{ background-color: #f0f0f0; padding: 10px; border-radius: 4px; overflow-x: auto; }}
                        code {{ font-family: monospace; }}
                        table {{ border-collapse: collapse; width: 100%; }}
                        th, td {{ border: 1px solid #ddd; padding: 8px; }}
                        th {{ background-color: #f2f2f2; }}
                        blockquote {{ border-left: 4px solid #ccc; padding-left: 10px; color: #555; margin-left: 0; }}
                    </style>
                    {html_content}
                </div>
            </foreignObject>
        </svg>
        """

        svg_base64 = base64.b64encode(svg_template.encode('utf-8')).decode('utf-8')
        data_url = f"data:image/svg+xml;base64,{svg_base64}"

        # --- FIX for Editability ---
        # Return the original markdown text along with the SVG
        return Result.ok(data={"svg_data_url": data_url, "original_markdown": markdown_text})

    except Exception as e:
        self.app.logger.error(f"Error converting Markdown to SVG: {e}", exc_info=True)
        return Result.default_internal_error("Failed to convert Markdown to SVG.")

save_session(app, request, data) async

Saves the entire state of a canvas session to the database. This is typically triggered by a user's explicit "Save" action.

Source code in toolboxv2/mods/Canvas.py
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="save_session", api_methods=['POST'], request_as_kwarg=True)
async def save_session(app: App, request: RequestData, data: dict[str, Any] | IdeaSessionData) -> Result:
    """
    Saves the entire state of a canvas session to the database.
    This is typically triggered by a user's explicit "Save" action.
    """
    if not data:
        return Result.default_user_error("Request data is missing.", 400)
    if request is None:
        return Result.default_user_error("Request data is missing.", 400)
    canvas_tool = app.get_mod(MOD_NAME)
    if not canvas_tool or not canvas_tool.db_mod:
        app.logger.error("Save failed: Canvas module or DB not available.")
        return Result.custom_error(info="Database module not available.", exec_code=503)

    user_db_key_base = await canvas_tool._get_user_specific_db_key(request, SESSION_DATA_PREFIX)
    if not user_db_key_base:
        return Result.default_user_error(info="User authentication required to save.", exec_code=401)

    try:
        # Validate the incoming data against the Pydantic model
        session_data_obj = IdeaSessionData(**data) if isinstance(data, dict) else data
    except Exception as e:
        app.logger.error(f"Invalid session data for save: {e}. Data: {str(data)[:500]}", exc_info=True)
        return Result.default_user_error(info=f"Invalid session data format: {e}", exec_code=400)

    # Update timestamp and construct the main session key
    if session_data_obj:
        session_data_obj.last_modified = datetime.now(UTC).timestamp()
    session_db_key = f"{user_db_key_base}_{session_data_obj.id}"

    # Save the full session object to the database
    canvas_tool.db_mod.set(session_db_key, session_data_obj.model_dump_json(exclude_none=True))
    app.logger.info(f"Saved session data for C:{session_data_obj.id}")

    # --- Update the session list metadata ---
    session_list_key = f"{user_db_key_base}{SESSION_LIST_KEY_SUFFIX}"
    try:
        list_res_obj = canvas_tool.db_mod.get(session_list_key)
        user_sessions = []
        if list_res_obj and not list_res_obj.is_error() and list_res_obj.get():
            list_content = list_res_obj.get()[0] if isinstance(list_res_obj.get(), list) else list_res_obj.get()
            user_sessions = json.loads(list_content)

        # Find and update the existing entry, or add a new one
        session_metadata = {
            "id": session_data_obj.id,
            "name": session_data_obj.name,
            "last_modified": session_data_obj.last_modified
        }
        found_in_list = False
        for i, sess_meta in enumerate(user_sessions):
            if sess_meta.get("id") == session_data_obj.id:
                user_sessions[i] = session_metadata
                found_in_list = True
                break
        if not found_in_list:
            user_sessions.append(session_metadata)

        canvas_tool.db_mod.set(session_list_key, json.dumps(user_sessions))
        app.logger.info(f"Updated session list for user key ending in ...{user_db_key_base[-12:]}")

    except Exception as e:
        app.logger.error(f"Failed to update session list for C:{session_data_obj.id}. Error: {e}", exc_info=True)
        # Non-fatal error; the main data was saved. We can continue.

    return Result.ok(
        info="Session saved successfully.",
        data={"id": session_data_obj.id, "last_modified": session_data_obj.last_modified}
    )

ChatModule

get_chat_ui(app)

Liefert das Haupt-HTML-UI für das Chat-Widget. Es verwendet app.web_context(), um das notwendige tbjs CSS und JS einzubinden.

Source code in toolboxv2/mods/ChatModule.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
@export(mod_name=Name, version=version, api=True, name="ui", row=True)
def get_chat_ui(app: App) -> Result:
    """
    Liefert das Haupt-HTML-UI für das Chat-Widget.
    Es verwendet `app.web_context()`, um das notwendige tbjs CSS und JS einzubinden.
    """

    html_content = f"""
        {app.web_context()}
        <style>
            body {{
                display: flex;
                align-items: center;
                justify-content: center;
                min-height: 100vh;
                padding: 1rem;
                background-color: var(--theme-bg);
            }}
        </style>
        <main id="chat-container" style="width: 100%; height: 80vh;">
            <!-- Das Chat-Widget wird hier initialisiert -->
        </main>

        <script unsave="true">
            // Verwende TB.once, um sicherzustellen, dass das Framework vollständig initialisiert ist,
            // bevor unser Code ausgeführt wird.
            TB.once(() => {{
                const chatContainer = document.getElementById('chat-container');
                if (chatContainer && TB.ui.ChatWidget) {{
                    // Initialisiere das Chat-Widget in unserem Container
                    TB.ui.ChatWidget.init(chatContainer);

                    // Verbinde mit dem in diesem Modul definierten WebSocket-Endpunkt
                    TB.ui.ChatWidget.connect();
                }} else {{
                    console.error("Chat UI initialization failed: container or ChatWidget not found.");
                }}
            }});
        </script>
    """

    return Result.html(data=html_content)

on_chat_message(app, conn_id, session, payload) async

Wird aufgerufen, wenn eine Nachricht von einem Client empfangen wird.

Source code in toolboxv2/mods/ChatModule.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
async def on_chat_message(app: App, conn_id: str, session: dict, payload: dict):
    """
    Wird aufgerufen, wenn eine Nachricht von einem Client empfangen wird.
    """
    username = session.get("user_name", "Anonymous")
    print(f"WS MESSAGE from {username} ({conn_id}): {session}")
    message_text = payload.get("data", {}).get("message", "").strip()

    if not message_text:
        return  # Ignoriere leere Nachrichten

    app.print(f"WS MESSAGE from {username} ({conn_id}): {message_text}")

    # Sende die Nachricht an alle im Raum (einschließlich des Absenders)
    await app.ws_broadcast(
        channel_id="ChatModule/public_room",
        payload={"event": "new_message", "data": {"user": username, "text": message_text}}
    )

on_user_connect(app, conn_id, session) async

Wird vom Rust WebSocket Actor aufgerufen, wenn ein neuer Client eine Verbindung herstellt.

Source code in toolboxv2/mods/ChatModule.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
async def on_user_connect(app: App, conn_id: str, session: dict):
    """
    Wird vom Rust WebSocket Actor aufgerufen, wenn ein neuer Client eine Verbindung herstellt.
    """
    username = session.get("user_name", "Anonymous")
    app.print(f"WS CONNECT: User '{username}' connected with conn_id: {conn_id}")

    # Sende eine Willkommensnachricht direkt an den neuen Benutzer (1-zu-1)
    await app.ws_send(conn_id, {"event": "welcome", "data": f"Welcome to the public chat, {username}!"})

    # Kündige den neuen Benutzer allen anderen im Raum an (1-zu-n)
    await app.ws_broadcast(
        channel_id="ChatModule/public_room",
        payload={"event": "user_joined", "data": f"👋 {username} has joined the chat."},
        source_conn_id=conn_id  # Schließt den Absender von diesem Broadcast aus
    )

on_user_disconnect(app, conn_id, session=None) async

Wird aufgerufen, wenn die Verbindung eines Clients geschlossen wird.

Source code in toolboxv2/mods/ChatModule.py
59
60
61
62
63
64
65
66
67
68
69
70
71
72
async def on_user_disconnect(app: App, conn_id: str, session: dict=None):
    """
    Wird aufgerufen, wenn die Verbindung eines Clients geschlossen wird.
    """
    if session is None:
        session = {}
    username = session.get("user_name", "Anonymous")
    app.print(f"WS DISCONNECT: User '{username}' disconnected (conn_id: {conn_id})")

    # Kündige den Weggang des Benutzers allen verbleibenden Benutzern im Raum an
    await app.ws_broadcast(
        channel_id="ChatModule/public_room",
        payload={"event": "user_left", "data": f"😥 {username} has left the chat."}
    )

register_chat_handlers(app)

Registriert die asynchronen Funktionen als Handler für spezifische WebSocket-Ereignisse. Der Funktionsname (register_chat_handlers) ist beliebig. Der Decorator ist entscheidend.

Returns:

Type Description
dict

Ein Dictionary, das Ereignisnamen auf ihre Handler-Funktionen abbildet.

Source code in toolboxv2/mods/ChatModule.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
@export(mod_name=Name, version=version, websocket_handler="public_room")
def register_chat_handlers(app: App) -> dict:
    """
    Registriert die asynchronen Funktionen als Handler für spezifische WebSocket-Ereignisse.
    Der Funktionsname (`register_chat_handlers`) ist beliebig. Der Decorator ist entscheidend.

    Returns:
        Ein Dictionary, das Ereignisnamen auf ihre Handler-Funktionen abbildet.
    """
    return {
        "on_connect": on_user_connect,
        "on_message": on_chat_message,
        "on_disconnect": on_user_disconnect,
    }

CloudM

check_multiple_processes(pids)

Checks the status of multiple processes in a single system call. Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).

Source code in toolboxv2/mods/CloudM/mini.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def check_multiple_processes(pids: list[int]) -> dict[int, str]:
    """
    Checks the status of multiple processes in a single system call.
    Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).
    """
    if not pids:
        return {}

    pid_status = {}

    if os.name == 'nt':  # Windows
        try:
            # Windows tasklist requires separate /FI for each filter
            command = 'tasklist'

            # Add encoding handling for Windows
            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='cp850'  # Use cp850 for Windows console output
            )
            # Create a set of running PIDs from the output
            running_pids = set()
            for line in result.stdout.lower().split('\n'):
                for pid in pids:
                    if str(pid) in line:
                        running_pids.add(pid)
            # Assign status based on whether PID was found in output
            for pid in pids:
                if pid in running_pids:
                    pid_status[pid] = GREEN_CIRCLE
                else:
                    pid_status[pid] = RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            # Mark all as YELLOW_CIRCLE if there's an error running the command
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE
        except UnicodeDecodeError as e:
            print(f"UnicodeDecodeError: {e}")  # For debugging
            # Try alternate encoding if cp850 fails
            try:
                result = subprocess.run(
                    command,
                    capture_output=True,
                    text=True,
                    shell=True,
                    encoding='utf-8'
                )
                running_pids = set()
                for line in result.stdout.lower().split('\n'):
                    for pid in pids:
                        if str(pid) in line:
                            running_pids.add(pid)

                for pid in pids:
                    pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE
            except Exception as e:
                print(f"Failed with alternate encoding: {e}")  # For debugging
                for pid in pids:
                    pid_status[pid] = YELLOW_CIRCLE

    else:  # Unix/Linux/Mac
        try:
            pids_str = ','.join(str(pid) for pid in pids)
            command = f'ps -p {pids_str} -o pid='

            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='utf-8'
            )
            running_pids = set(int(pid) for pid in result.stdout.strip().split())

            for pid in pids:
                pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE

    return pid_status

get_service_pids(info_dir)

Extracts service names and PIDs from pid files.

Source code in toolboxv2/mods/CloudM/mini.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def get_service_pids(info_dir):
    """Extracts service names and PIDs from pid files."""
    services = {}
    pid_files = [f for f in os.listdir(info_dir) if re.match(r'(.+)-(.+)\.pid', f)]
    for pid_file in pid_files:
        match = re.match(r'(.+)-(.+)\.pid', pid_file)
        if match:
            services_type, service_name = match.groups()
            # Read the PID from the file
            with open(os.path.join(info_dir, pid_file)) as file:
                pid = file.read().strip()
                # Store the PID using a formatted key
                services[f"{service_name} - {services_type}"] = int(pid)
    return services

get_service_status(dir)

Displays the status of all services.

Source code in toolboxv2/mods/CloudM/mini.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_service_status(dir: str) -> str:
    """Displays the status of all services."""
    if time.time()-services_data_sto_last_update_time[0] > 30:
        services = get_service_pids(dir)
        services_data_sto[0] = services
        services_data_sto_last_update_time[0] = time.time()
    else:
        services = services_data_sto[0]
    if not services:
        return "No services found"

    # Get status for all PIDs in a single call
    pid_statuses = check_multiple_processes(list(services.values()))

    # Build the status string
    res_s = "Service(s):" + ("\n" if len(services) > 1 else ' ')
    for service_name, pid in services.items():
        status = pid_statuses.get(pid, YELLOW_CIRCLE)
        res_s += f"{status} {service_name} (PID: {pid})\n"
    services_data_display[0] = res_s.strip()
    return res_s.rstrip()

AuthManager

delete_user(app, username)

Deletes a user and all their data.

Source code in toolboxv2/mods/CloudM/AuthManager.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
@export(mod_name=Name, state=True, test=False, interface=ToolBoxInterfaces.native)
def delete_user(app: App, username: str):
    """Deletes a user and all their data."""
    if not db_helper_test_exist(app, username):
        return Result.default_user_error(f"User '{username}' not found.")

    # This will delete all entries matching the user
    result = db_helper_delete_user(app, username, '*', matching=True)

    if result.is_ok():
        # Also remove the local private key file if it exists
        app.config_fh.remove_key_file_handler("Pk" + Code.one_way_hash(username, "dvp-k")[:8])
        return Result.ok(f"User '{username}' deleted successfully.")
    else:
        return Result.default_internal_error(f"Failed to delete user '{username}'.", data=result)
list_users(app)

Lists all registered users.

Source code in toolboxv2/mods/CloudM/AuthManager.py
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
@export(mod_name=Name, state=True, test=False, interface=ToolBoxInterfaces.native)
def list_users(app: App):
    """Lists all registered users."""
    keys_result = app.run_any(TBEF.DB.GET, query="all-k", get_results=True)
    if keys_result.is_error():
        return keys_result

    user_keys = keys_result.get()
    if not user_keys:
        return Result.ok("No users found.")

    users = []
    for key in user_keys:
        if isinstance(key, bytes):
            key = key.decode()
        if not key.startswith("USER::"):
            continue
        # Extract username from the key USER::username::uid
        parts = key.split('::')
        if len(parts) > 1 and parts[1] not in [u['username'] for u in users]:
            user_res = get_user_by_name(app, parts[1])
            if user_res.is_ok():
                user_data = user_res.get()
                users.append({"username": user_data.name, "email": user_data.email, "level": user_data.level})

    return Result.ok(data=users)

ModManager

create_and_pack_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None)

Erstellt ein Python-Modul und packt es in eine ZIP-Datei.

Parameters:

Name Type Description Default
path str

Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.

required
additional_dirs dict

Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.

None
version str

Version des Moduls.

'-.-.-'
module_name str

Name des Moduls.

''

Returns:

Name Type Description
str

Pfad zur erstellten ZIP-Datei.

Source code in toolboxv2/mods/CloudM/ModManager.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def create_and_pack_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None):
    """
    Erstellt ein Python-Modul und packt es in eine ZIP-Datei.

    Args:
        path (str): Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.
        additional_dirs (dict): Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.
        version (str): Version des Moduls.
        module_name (str): Name des Moduls.

    Returns:
        str: Pfad zur erstellten ZIP-Datei.
    """
    if additional_dirs is None:
        additional_dirs = {}
    if yaml_data is None:
        yaml_data = {}

    os.makedirs("./mods_sto/temp/", exist_ok=True)

    module_path = os.path.join(path, module_name)
    print("module_pathmodule_pathmodule_path", module_path)
    if not os.path.exists(module_path):
        module_path += '.py'

    temp_dir = tempfile.mkdtemp(dir=os.path.join("./mods_sto", "temp"))
    zip_file_name = f"RST${module_name}&{__version__}§{version}.zip"
    zip_path = f"./mods_sto/{zip_file_name}"

    # Modulverzeichnis erstellen, falls es nicht existiert
    if not os.path.exists(module_path):
        return False

    if os.path.isdir(module_path):
        # tbConfig.yaml erstellen
        config_path = os.path.join(module_path, "tbConfig.yaml")
        with open(config_path, 'w') as config_file:
            yaml.dump({"version": version, "module_name": module_name,
                       "dependencies_file": f"./mods/{module_name}/requirements.txt",
                       "zip": zip_file_name, **yaml_data}, config_file)

        generate_requirements(module_path, os.path.join(module_path, "requirements.txt"))
    # Datei oder Ordner in das Modulverzeichnis kopieren
    if os.path.isdir(module_path):
        shutil.copytree(module_path, os.path.join(temp_dir, os.path.basename(module_path)), dirs_exist_ok=True)
    else:
        shutil.copy2(module_path, temp_dir)
        config_path = os.path.join(temp_dir, f"{module_name}.yaml")
        with open(config_path, 'w') as config_file:
            yaml.dump({"version": version, "dependencies_file": f"./mods/{module_name}/requirements.txt",
                       "module_name": module_name, **yaml_data}, config_file)
        generate_requirements(temp_dir, os.path.join(temp_dir, "requirements.txt"))
    # Zusätzliche Verzeichnisse hinzufügen
    for dir_name, dir_paths in additional_dirs.items():
        if isinstance(dir_paths, str):
            dir_paths = [dir_paths]
        for dir_path in dir_paths:
            full_path = os.path.join(temp_dir, dir_name)
            if os.path.isdir(dir_path):
                shutil.copytree(dir_path, full_path, dirs_exist_ok=True)
            elif os.path.isfile(dir_path):
                # Stellen Sie sicher, dass das Zielverzeichnis existiert
                os.makedirs(full_path, exist_ok=True)
                # Kopieren Sie die Datei statt des Verzeichnisses
                shutil.copy2(dir_path, full_path)
            else:
                print(f"Der Pfad {dir_path} ist weder ein Verzeichnis noch eine Datei.")

    # Modul in eine ZIP-Datei packen
    with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
        for root, _dirs, files in os.walk(temp_dir):
            for file in files:
                file_path = os.path.join(root, file)
                zipf.write(file_path, os.path.relpath(file_path, temp_dir))

    # Temperatures Modulverzeichnis löschen
    shutil.rmtree(temp_dir)

    return zip_path
download_files(urls, directory, desc, print_func, filename=None)

Hilfsfunktion zum Herunterladen von Dateien.

Source code in toolboxv2/mods/CloudM/ModManager.py
72
73
74
75
76
77
78
79
80
81
def download_files(urls, directory, desc, print_func, filename=None):
    """ Hilfsfunktion zum Herunterladen von Dateien. """
    for url in tqdm(urls, desc=desc):
        if filename is None:
            filename = os.path.basename(url)
        print_func(f"Download {filename}")
        print_func(f"{url} -> {directory}/{filename}")
        os.makedirs(directory, exist_ok=True)
        urllib.request.urlretrieve(url, f"{directory}/{filename}")
    return f"{directory}/{filename}"
handle_requirements(requirements_url, module_name, print_func)

Verarbeitet und installiert Requirements.

Source code in toolboxv2/mods/CloudM/ModManager.py
84
85
86
87
88
89
90
91
92
93
94
95
def handle_requirements(requirements_url, module_name, print_func):
    """ Verarbeitet und installiert Requirements. """
    if requirements_url:
        requirements_filename = f"{module_name}-requirements.txt"
        print_func(f"Download requirements {requirements_filename}")
        urllib.request.urlretrieve(requirements_url, requirements_filename)

        print_func("Install requirements")
        run_command(
            [sys.executable, "-m", "pip", "install", "-r", requirements_filename])

        os.remove(requirements_filename)
increment_version(version_str, max_value=99)

Inkrementiert eine Versionsnummer im Format "vX.Y.Z".

Parameters:

Name Type Description Default
version_str str

Die aktuelle Versionsnummer, z. B. "v0.0.1".

required
max_value int

Die maximale Zahl pro Stelle (default: 99).

99

Returns:

Name Type Description
str str

Die inkrementierte Versionsnummer.

Source code in toolboxv2/mods/CloudM/ModManager.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def increment_version(version_str: str, max_value: int = 99) -> str:
    """
    Inkrementiert eine Versionsnummer im Format "vX.Y.Z".

    Args:
        version_str (str): Die aktuelle Versionsnummer, z. B. "v0.0.1".
        max_value (int): Die maximale Zahl pro Stelle (default: 99).

    Returns:
        str: Die inkrementierte Versionsnummer.
    """
    if not version_str.startswith("v"):
        raise ValueError("Die Versionsnummer muss mit 'v' beginnen, z. B. 'v0.0.1'.")

    # Entferne das führende 'v' und parse die Versionsnummer
    version_core = version_str[1:]
    try:
        version = Version(version_core)
    except ValueError as e:
        raise ValueError(f"Ungültige Versionsnummer: {version_core}") from e

    # Extrahiere die Versionsteile und konvertiere sie zu einer Liste
    parts = list(version.release)

    # Inkrementiere die letzte Stelle
    for i in range(len(parts) - 1, -1, -1):
        if parts[i] < max_value:
            parts[i] += 1
            break
        else:
            parts[i] = 0
            # Schleife fährt fort, um die nächsthöhere Stelle zu inkrementieren
    else:
        # Wenn alle Stellen auf "max_value" sind, füge eine neue Stelle hinzu
        parts.insert(0, 1)

    # Baue die neue Version
    new_version = "v" + ".".join(map(str, parts))
    return new_version
installer(app, module_name, build_state=True) async

Installiert oder aktualisiert ein Modul basierend auf der Remote-Version.

Source code in toolboxv2/mods/CloudM/ModManager.py
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
@export(mod_name=Name, name="install", test=False)
async def installer(app: App | None, module_name: str, build_state=True):
    """
    Installiert oder aktualisiert ein Modul basierend auf der Remote-Version.
    """
    if app is None:
        app = get_app(f"{Name}.installer")

    if not app.session.valid and not await app.session.login():
        return Result.default_user_error("Please login with CloudM login")

    # Hole nur die höchste verfügbare Version vom Server
    response = await app.session.fetch(f"/api/{Name}/getModVersion?module_name={module_name}", method="GET")
    remote_version: str = await response.text()
    if remote_version == "None":
        remote_version = None
    # Finde lokale Version
    local_version = find_highest_zip_version(
        module_name, version_only=True
    )

    if not local_version and not remote_version:
        return Result.default_user_error(f"404 mod {module_name} not found")

    # Vergleiche Versionen
    local_ver = pv.parse(local_version) if local_version else pv.parse("0.0.0")
    remote_ver = pv.parse(remote_version)

    app.print(f"Mod versions - Local: {local_ver}, Remote: {remote_ver}")

    if remote_ver > local_ver:
        # Konstruiere die URL direkt aus Modulname und Version
        download_path = Path(app.start_dir) / 'mods_sto'

        app.print(f"Fetching Mod from {app.session.base}/api/{Name}/download_mod?module_name={module_name}")
        if not await app.session.download_file(f"/api/{Name}/download_mod?module_name={module_name}", str(download_path)):
            app.print("Failed to download mod")
            if 'y' not in input("Download manually and place in mods_sto folder. Done? (y/n) ").lower():
                return Result.default_user_error("Installation cancelled")

        # Korrigiere Dateinamen
        zip_name = f"RST${module_name}&{app.version}§{remote_version}.zip"

        with Spinner("Installing from zip"):
            report = install_from_zip(app, zip_name)

        if not report:
            return Result.default_user_error("Setup error occurred")

        if build_state:
            get_state_from_app(app)

        return report

    app.print("Module is already up to date")
    return Result.ok()
run_command(command, cwd=None)

Führt einen Befehl aus und gibt den Output zurück.

Source code in toolboxv2/mods/CloudM/ModManager.py
519
520
521
522
523
def run_command(command, cwd=None):
    """Führt einen Befehl aus und gibt den Output zurück."""
    result = subprocess.run(command, cwd=cwd, capture_output=True, text=True, check=True,
                            encoding='cp850')
    return result.stdout
uninstall_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None)

Deinstalliert ein Python-Modul, indem es das Modulverzeichnis oder die ZIP-Datei entfernt.

Parameters:

Name Type Description Default
path str

Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.

required
additional_dirs dict

Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.

None
version str

Version des Moduls.

'-.-.-'
module_name str

Name des Moduls.

''
Source code in toolboxv2/mods/CloudM/ModManager.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
def uninstall_module(path, module_name='', version='-.-.-', additional_dirs=None, yaml_data=None):
    """
    Deinstalliert ein Python-Modul, indem es das Modulverzeichnis oder die ZIP-Datei entfernt.

    Args:
        path (str): Pfad zum Ordner oder zur Datei, die in das Modul aufgenommen werden soll.
        additional_dirs (dict): Zusätzliche Verzeichnisse, die hinzugefügt werden sollen.
        version (str): Version des Moduls.
        module_name (str): Name des Moduls.

    """
    if additional_dirs is None:
        additional_dirs = {}
    if yaml_data is None:
        yaml_data = {}

    os.makedirs("./mods_sto/temp/", exist_ok=True)

    base_path = os.path.dirname(path)
    module_path = os.path.join(base_path, module_name)
    zip_path = f"./mods_sto/RST${module_name}&{__version__}§{version}.zip"

    # Modulverzeichnis erstellen, falls es nicht existiert
    if not os.path.exists(module_path):
        print("Module %s already uninstalled")
        return False

    # Datei oder Ordner in das Modulverzeichnis kopieren
    shutil.rmtree(module_path)

    # Zusätzliche Verzeichnisse hinzufügen
    for _dir_name, dir_paths in additional_dirs.items():
        if isinstance(dir_paths, str):
            dir_paths = [dir_paths]
        for dir_path in dir_paths:
            shutil.rmtree(dir_path)
            print(f"Der Pfad {dir_path} wurde entfernt")

    # Ursprüngliches Modulverzeichnis löschen
    shutil.rmtree(zip_path)
unpack_and_move_module(zip_path, base_path='./mods', module_name='')

Entpackt eine ZIP-Datei und verschiebt die Inhalte an die richtige Stelle. Überschreibt existierende Dateien für Update-Unterstützung.

Parameters:

Name Type Description Default
zip_path str

Pfad zur ZIP-Datei, die entpackt werden soll

required
base_path str

Basispfad, unter dem das Modul gespeichert werden soll

'./mods'
module_name str

Name des Moduls (optional, wird sonst aus ZIP-Namen extrahiert)

''

Returns:

Name Type Description
str str

Name des installierten Moduls

Source code in toolboxv2/mods/CloudM/ModManager.py
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def unpack_and_move_module(zip_path: str, base_path: str = './mods', module_name: str = '') -> str:
    """
    Entpackt eine ZIP-Datei und verschiebt die Inhalte an die richtige Stelle.
    Überschreibt existierende Dateien für Update-Unterstützung.

    Args:
        zip_path (str): Pfad zur ZIP-Datei, die entpackt werden soll
        base_path (str): Basispfad, unter dem das Modul gespeichert werden soll
        module_name (str): Name des Moduls (optional, wird sonst aus ZIP-Namen extrahiert)

    Returns:
        str: Name des installierten Moduls
    """
    # Konvertiere Pfade zu Path-Objekten für bessere Handhabung
    zip_path = Path(zip_path)
    base_path = Path(base_path)

    # Extrahiere Modulnamen falls nicht angegeben
    if not module_name:
        module_name = zip_path.name.split('$')[1].split('&')[0]

    module_path = base_path / module_name
    temp_base = Path('./mods_sto/temp')

    try:
        # Erstelle temporäres Verzeichnis
        temp_base.mkdir(parents=True, exist_ok=True)
        with tempfile.TemporaryDirectory(dir=str(temp_base)) as temp_dir:
            temp_dir = Path(temp_dir)

            with Spinner(f"Extracting {zip_path.name}"):
                # Entpacke ZIP-Datei
                with zipfile.ZipFile(zip_path, 'r') as zip_ref:
                    zip_ref.extractall(temp_dir)

            # Behandle Modul-Verzeichnis
            source_module = temp_dir / module_name
            if source_module.exists():
                with Spinner(f"Installing module to {module_path}"):
                    if module_path.exists():
                        # Lösche existierendes Modul-Verzeichnis für sauberes Update
                        shutil.rmtree(module_path)
                    # Verschiebe neues Modul-Verzeichnis
                    shutil.copytree(source_module, module_path, dirs_exist_ok=True)

            # Behandle zusätzliche Dateien im Root
            with Spinner("Installing additional files"):
                for item in temp_dir.iterdir():
                    if item.name == module_name:
                        continue

                    target = Path('./') / item.name
                    if item.is_dir():
                        with Spinner(f"Installing directory {item.name}"):
                            if target.exists():
                                shutil.rmtree(target)
                            shutil.copytree(item, target, dirs_exist_ok=True)
                    else:
                        with Spinner(f"Installing file {item.name}"):
                            shutil.copy2(item, target)

            print(f"Successfully installed/updated module {module_name} to {module_path}")
            return module_name

    except Exception as e:
        print(f"Error during installation: {str(e)}")
        # Cleanup bei Fehler
        if module_path.exists():
            shutil.rmtree(module_path)
        raise

ModManager_tests

TestModManager

Bases: TestCase

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
class TestModManager(unittest.TestCase):
    app: App = None

    def test_increment_version(self):
        """Tests the version increment logic."""
        print("\nTesting increment_version...")
        self.assertEqual(increment_version("v0.0.1"), "v0.0.2")
        self.assertEqual(increment_version("v0.0.99", max_value=99), "v0.1.0")
        self.assertEqual(increment_version("v0.99.99", max_value=99), "v1.0.0")
        self.assertEqual(increment_version("v98"), "v99")
        with self.assertRaises(ValueError, msg="Should fail if 'v' is missing"):
            print(increment_version("0.0.1"))
        print("increment_version tests passed.")

    def setUp(self):
        """Set up a temporary environment for each test."""
        self.original_cwd = os.getcwd()
        self.test_dir = tempfile.mkdtemp(prefix="mod_manager_test_")

        # The functions in ModManager use relative paths like './mods' and './mods_sto'
        # We'll create these inside our temp directory and chdir into it.
        os.chdir(self.test_dir)
        os.makedirs("mods", exist_ok=True)
        os.makedirs("mods_sto", exist_ok=True)
        os.makedirs("source_module", exist_ok=True)

    def tearDown(self):
        """Clean up the temporary environment after each test."""
        os.chdir(self.original_cwd)
        shutil.rmtree(self.test_dir, ignore_errors=True)

    def test_create_pack_unpack_cycle(self):
        """Tests the full cycle of creating, packing, and unpacking a module."""
        print("\nTesting create_pack_unpack_cycle...")
        module_name = "MyTestMod"
        module_version = "v0.1.0"

        # 1. Create a dummy module structure inside the temp 'source_module' dir
        source_path = Path("source_module")
        module_source_path = source_path / module_name
        module_source_path.mkdir()
        (module_source_path / "main.py").write_text("print('hello from my test mod')")
        (module_source_path / "data.txt").write_text("some test data")

        # 2. Call create_and_pack_module
        # The 'path' argument is the parent directory of the module directory.
        zip_path_str = create_and_pack_module(
            path=str(source_path),
            module_name=module_name,
            version=module_version
        )
        self.assertTrue(zip_path_str, "create_and_pack_module should return a path.")
        zip_path = Path(zip_path_str)

        # 3. Assert the zip file was created in the correct location ('./mods_sto')
        self.assertTrue(zip_path.exists(), f"Zip file should exist at {zip_path}")
        self.assertEqual(zip_path.parent.name, "mods_sto")

        # 4. Call unpack_and_move_module
        # We unpack into the './mods' directory.
        unpacked_name = unpack_and_move_module(
            zip_path=str(zip_path),
            base_path="mods"
        )

        # 5. Assert the module was unpacked correctly
        self.assertEqual(unpacked_name, module_name)
        unpacked_dir = Path("mods") / module_name
        self.assertTrue(unpacked_dir.is_dir(), "Unpacked module directory should exist.")

        # Verify content
        self.assertTrue((unpacked_dir / "main.py").exists())
        self.assertEqual((unpacked_dir / "main.py").read_text(), "print('hello from my test mod')")
        self.assertTrue((unpacked_dir / "data.txt").exists())
        self.assertEqual((unpacked_dir / "data.txt").read_text(), "some test data")

        # Verify that the tbConfig.yaml was created and has correct info
        config_path = unpacked_dir / "tbConfig.yaml"
        self.assertTrue(config_path.exists())
        with open(config_path) as f:
            config = yaml.safe_load(f)
        self.assertEqual(config.get("module_name"), module_name)
        self.assertEqual(config.get("version"), module_version)

        print("create_pack_unpack_cycle tests passed.")

    def test_install_from_zip(self):
        """Tests the install_from_zip helper function."""
        print("\nTesting install_from_zip...")
        module_name = "MyInstallTestMod"
        module_version = "v0.1.1"

        # 1. Create a dummy module and zip it
        source_path = Path("source_module")
        module_source_path = source_path / module_name
        module_source_path.mkdir()
        (module_source_path / "main.py").write_text("pass")
        zip_path_str = create_and_pack_module(
            path=str(source_path),
            module_name=module_name,
            version=module_version
        )
        zip_path = Path(zip_path_str)
        zip_name = zip_path.name

        # 2. Mock the app object needed by install_from_zip
        mock_app = lambda :None
        mock_app.start_dir = self.test_dir

        # 3. Call install_from_zip
        result = install_from_zip(mock_app, zip_name, no_dep=True)

        # 4. Assert the installation was successful
        self.assertTrue(result)
        unpacked_dir = Path("mods") / module_name
        self.assertTrue(unpacked_dir.is_dir())
        self.assertTrue((unpacked_dir / "main.py").exists())
        print("install_from_zip tests passed.")
setUp()

Set up a temporary environment for each test.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
58
59
60
61
62
63
64
65
66
67
68
def setUp(self):
    """Set up a temporary environment for each test."""
    self.original_cwd = os.getcwd()
    self.test_dir = tempfile.mkdtemp(prefix="mod_manager_test_")

    # The functions in ModManager use relative paths like './mods' and './mods_sto'
    # We'll create these inside our temp directory and chdir into it.
    os.chdir(self.test_dir)
    os.makedirs("mods", exist_ok=True)
    os.makedirs("mods_sto", exist_ok=True)
    os.makedirs("source_module", exist_ok=True)
tearDown()

Clean up the temporary environment after each test.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
70
71
72
73
def tearDown(self):
    """Clean up the temporary environment after each test."""
    os.chdir(self.original_cwd)
    shutil.rmtree(self.test_dir, ignore_errors=True)
test_create_pack_unpack_cycle()

Tests the full cycle of creating, packing, and unpacking a module.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
def test_create_pack_unpack_cycle(self):
    """Tests the full cycle of creating, packing, and unpacking a module."""
    print("\nTesting create_pack_unpack_cycle...")
    module_name = "MyTestMod"
    module_version = "v0.1.0"

    # 1. Create a dummy module structure inside the temp 'source_module' dir
    source_path = Path("source_module")
    module_source_path = source_path / module_name
    module_source_path.mkdir()
    (module_source_path / "main.py").write_text("print('hello from my test mod')")
    (module_source_path / "data.txt").write_text("some test data")

    # 2. Call create_and_pack_module
    # The 'path' argument is the parent directory of the module directory.
    zip_path_str = create_and_pack_module(
        path=str(source_path),
        module_name=module_name,
        version=module_version
    )
    self.assertTrue(zip_path_str, "create_and_pack_module should return a path.")
    zip_path = Path(zip_path_str)

    # 3. Assert the zip file was created in the correct location ('./mods_sto')
    self.assertTrue(zip_path.exists(), f"Zip file should exist at {zip_path}")
    self.assertEqual(zip_path.parent.name, "mods_sto")

    # 4. Call unpack_and_move_module
    # We unpack into the './mods' directory.
    unpacked_name = unpack_and_move_module(
        zip_path=str(zip_path),
        base_path="mods"
    )

    # 5. Assert the module was unpacked correctly
    self.assertEqual(unpacked_name, module_name)
    unpacked_dir = Path("mods") / module_name
    self.assertTrue(unpacked_dir.is_dir(), "Unpacked module directory should exist.")

    # Verify content
    self.assertTrue((unpacked_dir / "main.py").exists())
    self.assertEqual((unpacked_dir / "main.py").read_text(), "print('hello from my test mod')")
    self.assertTrue((unpacked_dir / "data.txt").exists())
    self.assertEqual((unpacked_dir / "data.txt").read_text(), "some test data")

    # Verify that the tbConfig.yaml was created and has correct info
    config_path = unpacked_dir / "tbConfig.yaml"
    self.assertTrue(config_path.exists())
    with open(config_path) as f:
        config = yaml.safe_load(f)
    self.assertEqual(config.get("module_name"), module_name)
    self.assertEqual(config.get("version"), module_version)

    print("create_pack_unpack_cycle tests passed.")
test_increment_version()

Tests the version increment logic.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
47
48
49
50
51
52
53
54
55
56
def test_increment_version(self):
    """Tests the version increment logic."""
    print("\nTesting increment_version...")
    self.assertEqual(increment_version("v0.0.1"), "v0.0.2")
    self.assertEqual(increment_version("v0.0.99", max_value=99), "v0.1.0")
    self.assertEqual(increment_version("v0.99.99", max_value=99), "v1.0.0")
    self.assertEqual(increment_version("v98"), "v99")
    with self.assertRaises(ValueError, msg="Should fail if 'v' is missing"):
        print(increment_version("0.0.1"))
    print("increment_version tests passed.")
test_install_from_zip()

Tests the install_from_zip helper function.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
def test_install_from_zip(self):
    """Tests the install_from_zip helper function."""
    print("\nTesting install_from_zip...")
    module_name = "MyInstallTestMod"
    module_version = "v0.1.1"

    # 1. Create a dummy module and zip it
    source_path = Path("source_module")
    module_source_path = source_path / module_name
    module_source_path.mkdir()
    (module_source_path / "main.py").write_text("pass")
    zip_path_str = create_and_pack_module(
        path=str(source_path),
        module_name=module_name,
        version=module_version
    )
    zip_path = Path(zip_path_str)
    zip_name = zip_path.name

    # 2. Mock the app object needed by install_from_zip
    mock_app = lambda :None
    mock_app.start_dir = self.test_dir

    # 3. Call install_from_zip
    result = install_from_zip(mock_app, zip_name, no_dep=True)

    # 4. Assert the installation was successful
    self.assertTrue(result)
    unpacked_dir = Path("mods") / module_name
    self.assertTrue(unpacked_dir.is_dir())
    self.assertTrue((unpacked_dir / "main.py").exists())
    print("install_from_zip tests passed.")
run_mod_manager_tests(app)

This function will be automatically discovered and run by the test runner. It uses the standard unittest framework to run tests.

Source code in toolboxv2/mods/CloudM/ModManager_tests.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
@export(test_only=True)
def run_mod_manager_tests(app: App):
    """
    This function will be automatically discovered and run by the test runner.
    It uses the standard unittest framework to run tests.
    """
    print("Running ModManager Tests...")
    # We pass the app instance to the test class so it can be used if needed.
    TestModManager.app = app
    suite = unittest.TestSuite()
    suite.addTest(unittest.makeSuite(TestModManager))
    runner = unittest.TextTestRunner()
    result = runner.run(suite)
    if not result.wasSuccessful():
        # Raise an exception to signal failure to the toolboxv2 test runner
        raise AssertionError(f"ModManager tests failed: {result.errors} {result.failures}")
    print("ModManager tests passed successfully.")
    return True

UserAccountManager

get_current_user_from_request_api_wrapper(app, request) async

API callable version of get_current_user_from_request for tbjs/admin panel

Source code in toolboxv2/mods/CloudM/UserAccountManager.py
150
151
152
153
154
155
156
157
158
159
160
161
@export(mod_name=Name, api=True, version=version, request_as_kwarg=True, row=False)  # row=False to return JSON
async def get_current_user_from_request_api_wrapper(app: App, request: RequestData):
    """ API callable version of get_current_user_from_request for tbjs/admin panel """
    user = await get_current_user_from_request(app, request)
    if not user:
        # Return error that tbjs can handle
        return Result.default_user_error(info="User not authenticated or found.", data=None, exec_code=401)
    user_dict = asdict(user)
    pub_user_data = {}
    for key in ['name','pub_key','email','creation_time','is_persona','level','log_level','settings']:
        pub_user_data[key] = user_dict.get(key, None)
    return Result.ok(data=pub_user_data)

email_services

send_email_verification_email(app, user_email, username, verification_url)

Sends an email verification link to the user.

Source code in toolboxv2/mods/CloudM/email_services.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
@s_export
def send_email_verification_email(app: App, user_email: str, username: str, verification_url: str):
    """Sends an email verification link to the user."""
    sender = EmailSender(app)
    subject = f"Verify Your Email for {APP_NAME}"
    preview_text = f"Almost there, {username}! Just one more step to activate your account."

    content_html = f"""
        <h2>Hi {username},</h2>
        <p>Thanks for signing up for {APP_NAME}! To complete your registration, please verify your email address by clicking the button below.</p>
        <a href="{verification_url}" class="button">Verify Email Address</a>
        <p>If you didn't create an account with {APP_NAME}, you can safely ignore this email.</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{verification_url}</span></p>
        <p>Sincerely,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text)

Sends a magic link email for login.

Source code in toolboxv2/mods/CloudM/email_services.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
@s_export
def send_magic_link_email(app: App, user_email: str, magic_link_url: str, username: str = None):
    """Sends a magic link email for login."""
    sender = EmailSender(app)
    greeting_name = f", {username}" if username else ""
    subject = f"Your Magic Login Link for {APP_NAME}"
    preview_text = "Securely access your account with this one-time link."

    content_html = f"""
        <h2>Hello{greeting_name}!</h2>
        <p>You requested a magic link to sign in to your {APP_NAME} account.</p>
        <p>Click the button below to log in. This link is temporary and will expire shortly.</p>
        <a href="{magic_link_url}" class="button">Log In Securely</a>
        <p> Invitation key: {magic_link_url.split('?key=')[1].split('&name=')[0].replace('%23', '#')}</p>
        <p>If you did not request this link, please ignore this email. Your account is safe.</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{magic_link_url}</span></p>
        <p>Thanks,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text)
send_signup_invitation_email(app, invited_user_email, invited_username, inviter_username=None)

Generates an invitation link and sends it via email.

Source code in toolboxv2/mods/CloudM/email_services.py
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
@s_export
def send_signup_invitation_email(app: App, invited_user_email: str, invited_username: str,
                                 inviter_username: str = None):
    """Generates an invitation link and sends it via email."""
    sender = EmailSender(app)

    # Generate invitation code as specified in the prompt
    # This uses the Code class, assuming TB_R_KEY is set in the environment
    invitation_code = Code.one_way_hash(invited_username, "00#", os.getenv("TB_R_KEY", "pepper123"))[:12] + str(
        uuid.uuid4())[:6]

    # Construct the signup link URL (adjust your frontend signup path as needed)
    signup_link_url = f"{APP_BASE_URL}/web/assets/signup.html?invitation={quote(invitation_code)}&email={quote(invited_user_email)}&username={quote(invited_username)}"

    subject = f"You're Invited to Join {APP_NAME}!"
    preview_text = f"{inviter_username or 'A friend'} has invited you to {APP_NAME}!"
    inviter_line = f"<p>{inviter_username} has invited you to join.</p>" if inviter_username else "<p>You've been invited to join.</p>"

    content_html = f"""
        <h2>Hello {invited_username},</h2>
        {inviter_line}
        <p>{APP_NAME} is an exciting platform, and we'd love for you to be a part of it!</p>
        <p>Click the button below to accept the invitation and create your account:</p>
        <a href="{signup_link_url}" class="button">Accept Invitation & Sign Up</a>
        <p>This invitation is unique to you : {invitation_code}</p>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{signup_link_url}</span></p>
        <p>We look forward to seeing you there!<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(invited_user_email, subject, content_html, preview_text)
send_waiting_list_confirmation_email(app, user_email)

Sends a confirmation email for joining the waiting list.

Source code in toolboxv2/mods/CloudM/email_services.py
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
@s_export
def send_waiting_list_confirmation_email(app: App, user_email: str):
    """Sends a confirmation email for joining the waiting list."""
    sender = EmailSender(app)
    subject = f"You're on the Waiting List for {APP_NAME}!"
    preview_text = "Thanks for your interest! We'll keep you updated."

    content_html = f"""
        <h2>You're In!</h2>
        <p>Thank you for joining the waiting list for {APP_NAME}. We're working hard to get things ready and appreciate your interest.</p>
        <p>We'll notify you as soon as we have updates or when access becomes available.</p>
        <p>In the meantime, you can follow our progress or learn more at <a href="{APP_BASE_URL}" class="link-in-text">{APP_BASE_URL}</a>.</p>
        <p>Stay tuned,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text,
                                  recipient_email_for_unsubscribe=user_email, show_unsubscribe_link=True)
send_welcome_email(app, user_email, username, welcome_action_url=None)

Sends a welcome email to a new user.

Source code in toolboxv2/mods/CloudM/email_services.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
@s_export  # Changed to native, api=False as it's a backend function
def send_welcome_email(app: App, user_email: str, username: str, welcome_action_url: str = None):
    """Sends a welcome email to a new user."""
    sender = EmailSender(app)
    subject = f"Welcome to {APP_NAME}, {username}!"
    preview_text = f"We're thrilled to have you, {username}!"
    action_url = welcome_action_url or f"{APP_BASE_URL}/dashboard"  # Default to dashboard

    content_html = f"""
        <h2>Welcome Aboard, {username}!</h2>
        <p>Thank you for signing up for {APP_NAME}. We're excited to have you join our community!</p>
        <p>Here are a few things you might want to do next:</p>
        <ul>
            <li>Explore your new account features.</li>
            <li>Customize your profile.</li>
        </ul>
        <p>Click the button below to get started:</p>
        <a href="{action_url}" class="button">Go to Your Dashboard</a>
        <p>If the button doesn't work, copy and paste this link into your browser:<br><span class="link-in-text">{action_url}</span></p>
        <p>Best regards,<br>The {APP_NAME} Team</p>
    """
    return sender.send_html_email(user_email, subject, content_html, preview_text,
                                  recipient_email_for_unsubscribe=user_email, show_unsubscribe_link=True)

mini

check_multiple_processes(pids)

Checks the status of multiple processes in a single system call. Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).

Source code in toolboxv2/mods/CloudM/mini.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def check_multiple_processes(pids: list[int]) -> dict[int, str]:
    """
    Checks the status of multiple processes in a single system call.
    Returns a dictionary mapping PIDs to their status (GREEN_CIRCLE, RED_CIRCLE, or YELLOW_CIRCLE).
    """
    if not pids:
        return {}

    pid_status = {}

    if os.name == 'nt':  # Windows
        try:
            # Windows tasklist requires separate /FI for each filter
            command = 'tasklist'

            # Add encoding handling for Windows
            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='cp850'  # Use cp850 for Windows console output
            )
            # Create a set of running PIDs from the output
            running_pids = set()
            for line in result.stdout.lower().split('\n'):
                for pid in pids:
                    if str(pid) in line:
                        running_pids.add(pid)
            # Assign status based on whether PID was found in output
            for pid in pids:
                if pid in running_pids:
                    pid_status[pid] = GREEN_CIRCLE
                else:
                    pid_status[pid] = RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            # Mark all as YELLOW_CIRCLE if there's an error running the command
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE
        except UnicodeDecodeError as e:
            print(f"UnicodeDecodeError: {e}")  # For debugging
            # Try alternate encoding if cp850 fails
            try:
                result = subprocess.run(
                    command,
                    capture_output=True,
                    text=True,
                    shell=True,
                    encoding='utf-8'
                )
                running_pids = set()
                for line in result.stdout.lower().split('\n'):
                    for pid in pids:
                        if str(pid) in line:
                            running_pids.add(pid)

                for pid in pids:
                    pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE
            except Exception as e:
                print(f"Failed with alternate encoding: {e}")  # For debugging
                for pid in pids:
                    pid_status[pid] = YELLOW_CIRCLE

    else:  # Unix/Linux/Mac
        try:
            pids_str = ','.join(str(pid) for pid in pids)
            command = f'ps -p {pids_str} -o pid='

            result = subprocess.run(
                command,
                capture_output=True,
                text=True,
                shell=True,
                encoding='utf-8'
            )
            running_pids = set(int(pid) for pid in result.stdout.strip().split())

            for pid in pids:
                pid_status[pid] = GREEN_CIRCLE if pid in running_pids else RED_CIRCLE

        except subprocess.SubprocessError as e:
            print(f"SubprocessError: {e}")  # For debugging
            for pid in pids:
                pid_status[pid] = YELLOW_CIRCLE

    return pid_status
get_service_pids(info_dir)

Extracts service names and PIDs from pid files.

Source code in toolboxv2/mods/CloudM/mini.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
def get_service_pids(info_dir):
    """Extracts service names and PIDs from pid files."""
    services = {}
    pid_files = [f for f in os.listdir(info_dir) if re.match(r'(.+)-(.+)\.pid', f)]
    for pid_file in pid_files:
        match = re.match(r'(.+)-(.+)\.pid', pid_file)
        if match:
            services_type, service_name = match.groups()
            # Read the PID from the file
            with open(os.path.join(info_dir, pid_file)) as file:
                pid = file.read().strip()
                # Store the PID using a formatted key
                services[f"{service_name} - {services_type}"] = int(pid)
    return services
get_service_status(dir)

Displays the status of all services.

Source code in toolboxv2/mods/CloudM/mini.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def get_service_status(dir: str) -> str:
    """Displays the status of all services."""
    if time.time()-services_data_sto_last_update_time[0] > 30:
        services = get_service_pids(dir)
        services_data_sto[0] = services
        services_data_sto_last_update_time[0] = time.time()
    else:
        services = services_data_sto[0]
    if not services:
        return "No services found"

    # Get status for all PIDs in a single call
    pid_statuses = check_multiple_processes(list(services.values()))

    # Build the status string
    res_s = "Service(s):" + ("\n" if len(services) > 1 else ' ')
    for service_name, pid in services.items():
        status = pid_statuses.get(pid, YELLOW_CIRCLE)
        res_s += f"{status} {service_name} (PID: {pid})\n"
    services_data_display[0] = res_s.strip()
    return res_s.rstrip()

module

hash_password(password)

Hash a password for storing.

Source code in toolboxv2/mods/CloudM/module.py
109
110
111
112
113
114
115
def hash_password(password):
    """Hash a password for storing."""
    salt = hashlib.sha256(os.urandom(60)).hexdigest().encode('ascii')
    pwdhash = hashlib.pbkdf2_hmac('sha512', password.encode('utf-8'), salt,
                                  100000)
    pwdhash = binascii.hexlify(pwdhash)
    return (salt + pwdhash).decode('ascii')
verify_password(stored_password, provided_password)

Verify a stored password against one provided by user

Source code in toolboxv2/mods/CloudM/module.py
119
120
121
122
123
124
125
126
def verify_password(stored_password, provided_password):
    """Verify a stored password against one provided by user"""
    salt = stored_password[:64]
    stored_password = stored_password[64:]
    pwdhash = hashlib.pbkdf2_hmac('sha512', provided_password.encode('utf-8'),
                                  salt.encode('ascii'), 100000)
    pwdhash = binascii.hexlify(pwdhash).decode('ascii')
    return pwdhash == stored_password

CodeVerification

VerificationSystem

Source code in toolboxv2/mods/CodeVerification.py
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
class VerificationSystem:
    def __init__(self, tools_db, scope="main"):
        """
        Initialize VerificationSystem with DB Tools integration

        Args:
            tools_db (Tools): Database tools from toolboxv2.mods.DB
            scope (str, optional): Scope for templates and codes. Defaults to "main".
        """
        self.tools_db = tools_db
        self.scope = scope
        self.tidmp = {}
        self._ensure_scope_templates()

    def get(self):
        return self

    def reset_scope_templates(self):
        """
        Ensure a templates dictionary exists for the current scope in the database
        """
        templates_key = f"verification_templates_{self.scope}"

        self.tools_db.set(templates_key, json.dumps({}))

    def _ensure_scope_templates(self):
        """
        Ensure a templates dictionary exists for the current scope in the database
        """
        templates_key = f"verification_templates_{self.scope}"

        # Check if templates exist for this scope
        templates_exist = self.tools_db.if_exist(templates_key)

        if templates_exist.is_error() and not templates_exist.is_data():
            # Initialize empty templates dictionary if not exists
            self.tools_db.set(templates_key, json.dumps({}))
        else:
            allt = self.get_all_templates()

            for k, v in allt.items():
                if 'name' not in v:
                    continue
                self.tidmp[v['name']] = k

    def add_config_template(self, template: ConfigTemplate) -> str:
        """
        Add a new configuration template to the database

        Args:
            template (ConfigTemplate): The configuration template

        Returns:
            str: Unique identifier of the template
        """
        # Ensure template has the current scope
        template.scope = self.scope

        # Generate a unique template ID
        template_id = secrets.token_urlsafe(8)

        # Get existing templates for this scope
        templates = self.get_all_templates()

        # Add new template
        self.tidmp[template.name] = template_id
        templates[template_id] = asdict(template)

        # Save updated templates back to database
        templates_key = f"verification_templates_{self.scope}"
        save_result = self.tools_db.set(templates_key, json.dumps(templates))

        if save_result.is_error():
            raise ValueError("Could not save template")

        return template_id

    def get_all_templates(self):
        templates_key = f"verification_templates_{self.scope}"
        templates_result = self.tools_db.get(templates_key)

        if not templates_result.is_error() and templates_result.is_data():
            try:
                templates_result.result.data = json.loads(templates_result.get())
            except Exception as e:
                templates_result.print()
                print(f"Errro loding template data curupted : {str(e)}")
                templates_result.result.data = {}
        else:
            templates_result.result.data = {}
        if not isinstance(templates_result, dict):
            templates_result = templates_result.result.data
        return templates_result

    def generate_code(self, template_id: str) -> str:
        """
        Generate a code based on the configuration template

        Args:
            template_id (str): ID of the configuration template

        Returns:
            str: Generated verification code
        """
        # Get templates for this scope
        templates = self.get_all_templates()
        print(templates, self.tidmp, template_id)
        if template_id not in templates:
            template_id = self.tidmp.get(template_id, template_id)
        if template_id not in templates:
            raise ValueError("Invalid configuration template")

        template_dict = templates[template_id]
        ConfigTemplate(**template_dict)

        # Generate a random code with max 16 characters
        code = secrets.token_urlsafe(10)[:16]

        # Prepare code information
        code_info = {
            'template_id': template_id,
            'created_at': time.time(),
            'uses_count': 0,
            'scope': self.scope
        }

        # Store code information in database
        codes_key = f"verification_codes_{self.scope}"
        existing_codes_result = self.tools_db.get(codes_key)

        existing_codes = {}
        if not existing_codes_result.is_error() and existing_codes_result.is_data():
            d = existing_codes_result.get()
            if isinstance(d, list):
                d = d[0]
            existing_codes = json.loads(d)

        existing_codes[code] = code_info

        save_result = self.tools_db.set(codes_key, json.dumps(existing_codes))

        if save_result.is_error():
            raise ValueError("Could not save generated code")

        return code

    def validate_code(self, code: str) -> dict[str, Any] | None:
        """
        Validate a code and return template information

        Args:
            code (str): Code to validate

        Returns:
            Optional[Dict[str, Any]]: Template information for valid code, else None
        """
        # Get codes for this scope
        codes_key = f"verification_codes_{self.scope}"
        codes_result = self.tools_db.get(codes_key)

        if codes_result.is_error() or not codes_result.is_data():
            return None

        d = codes_result.get()
        if isinstance(d, list):
            d = d[0]
        existing_codes = json.loads(d)

        if code not in existing_codes:
            return None

        code_info = existing_codes[code]

        # Check if code is from the same scope
        if code_info.get('scope') != self.scope:
            return None

        # Get templates for this scope
        templates = self.get_all_templates()
        template_id = code_info['template_id']

        if template_id not in templates:
            return templates

        template_dict = templates[template_id]
        template = ConfigTemplate(**template_dict)

        # Check usage count
        if code_info['uses_count'] >= template.max_uses:
            del existing_codes[code]
            self.tools_db.set(codes_key, json.dumps(existing_codes))
            return None

        # Check time validity for timed codes
        if template.usage_type == 'timed':
            current_time = time.time()
            if template.valid_duration and (current_time - code_info['created_at']) > template.valid_duration:
                del existing_codes[code]
                self.tools_db.set(codes_key, json.dumps(existing_codes))
                return None

        # Update uses count
        existing_codes[code]['uses_count'] += 1
        uses_count = existing_codes[code].get('uses_count', 1)
        # Remove code if it's a one-time use
        if template.usage_type == 'one_time':
            del existing_codes[code]

        # Save updated codes
        self.tools_db.set(codes_key, json.dumps(existing_codes))

        return {
            'template_name': template.name,
            'usage_type': template.usage_type,
            'uses_count': uses_count
        }
__init__(tools_db, scope='main')

Initialize VerificationSystem with DB Tools integration

Parameters:

Name Type Description Default
tools_db Tools

Database tools from toolboxv2.mods.DB

required
scope str

Scope for templates and codes. Defaults to "main".

'main'
Source code in toolboxv2/mods/CodeVerification.py
27
28
29
30
31
32
33
34
35
36
37
38
def __init__(self, tools_db, scope="main"):
    """
    Initialize VerificationSystem with DB Tools integration

    Args:
        tools_db (Tools): Database tools from toolboxv2.mods.DB
        scope (str, optional): Scope for templates and codes. Defaults to "main".
    """
    self.tools_db = tools_db
    self.scope = scope
    self.tidmp = {}
    self._ensure_scope_templates()
add_config_template(template)

Add a new configuration template to the database

Parameters:

Name Type Description Default
template ConfigTemplate

The configuration template

required

Returns:

Name Type Description
str str

Unique identifier of the template

Source code in toolboxv2/mods/CodeVerification.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def add_config_template(self, template: ConfigTemplate) -> str:
    """
    Add a new configuration template to the database

    Args:
        template (ConfigTemplate): The configuration template

    Returns:
        str: Unique identifier of the template
    """
    # Ensure template has the current scope
    template.scope = self.scope

    # Generate a unique template ID
    template_id = secrets.token_urlsafe(8)

    # Get existing templates for this scope
    templates = self.get_all_templates()

    # Add new template
    self.tidmp[template.name] = template_id
    templates[template_id] = asdict(template)

    # Save updated templates back to database
    templates_key = f"verification_templates_{self.scope}"
    save_result = self.tools_db.set(templates_key, json.dumps(templates))

    if save_result.is_error():
        raise ValueError("Could not save template")

    return template_id
generate_code(template_id)

Generate a code based on the configuration template

Parameters:

Name Type Description Default
template_id str

ID of the configuration template

required

Returns:

Name Type Description
str str

Generated verification code

Source code in toolboxv2/mods/CodeVerification.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def generate_code(self, template_id: str) -> str:
    """
    Generate a code based on the configuration template

    Args:
        template_id (str): ID of the configuration template

    Returns:
        str: Generated verification code
    """
    # Get templates for this scope
    templates = self.get_all_templates()
    print(templates, self.tidmp, template_id)
    if template_id not in templates:
        template_id = self.tidmp.get(template_id, template_id)
    if template_id not in templates:
        raise ValueError("Invalid configuration template")

    template_dict = templates[template_id]
    ConfigTemplate(**template_dict)

    # Generate a random code with max 16 characters
    code = secrets.token_urlsafe(10)[:16]

    # Prepare code information
    code_info = {
        'template_id': template_id,
        'created_at': time.time(),
        'uses_count': 0,
        'scope': self.scope
    }

    # Store code information in database
    codes_key = f"verification_codes_{self.scope}"
    existing_codes_result = self.tools_db.get(codes_key)

    existing_codes = {}
    if not existing_codes_result.is_error() and existing_codes_result.is_data():
        d = existing_codes_result.get()
        if isinstance(d, list):
            d = d[0]
        existing_codes = json.loads(d)

    existing_codes[code] = code_info

    save_result = self.tools_db.set(codes_key, json.dumps(existing_codes))

    if save_result.is_error():
        raise ValueError("Could not save generated code")

    return code
reset_scope_templates()

Ensure a templates dictionary exists for the current scope in the database

Source code in toolboxv2/mods/CodeVerification.py
43
44
45
46
47
48
49
def reset_scope_templates(self):
    """
    Ensure a templates dictionary exists for the current scope in the database
    """
    templates_key = f"verification_templates_{self.scope}"

    self.tools_db.set(templates_key, json.dumps({}))
validate_code(code)

Validate a code and return template information

Parameters:

Name Type Description Default
code str

Code to validate

required

Returns:

Type Description
dict[str, Any] | None

Optional[Dict[str, Any]]: Template information for valid code, else None

Source code in toolboxv2/mods/CodeVerification.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
def validate_code(self, code: str) -> dict[str, Any] | None:
    """
    Validate a code and return template information

    Args:
        code (str): Code to validate

    Returns:
        Optional[Dict[str, Any]]: Template information for valid code, else None
    """
    # Get codes for this scope
    codes_key = f"verification_codes_{self.scope}"
    codes_result = self.tools_db.get(codes_key)

    if codes_result.is_error() or not codes_result.is_data():
        return None

    d = codes_result.get()
    if isinstance(d, list):
        d = d[0]
    existing_codes = json.loads(d)

    if code not in existing_codes:
        return None

    code_info = existing_codes[code]

    # Check if code is from the same scope
    if code_info.get('scope') != self.scope:
        return None

    # Get templates for this scope
    templates = self.get_all_templates()
    template_id = code_info['template_id']

    if template_id not in templates:
        return templates

    template_dict = templates[template_id]
    template = ConfigTemplate(**template_dict)

    # Check usage count
    if code_info['uses_count'] >= template.max_uses:
        del existing_codes[code]
        self.tools_db.set(codes_key, json.dumps(existing_codes))
        return None

    # Check time validity for timed codes
    if template.usage_type == 'timed':
        current_time = time.time()
        if template.valid_duration and (current_time - code_info['created_at']) > template.valid_duration:
            del existing_codes[code]
            self.tools_db.set(codes_key, json.dumps(existing_codes))
            return None

    # Update uses count
    existing_codes[code]['uses_count'] += 1
    uses_count = existing_codes[code].get('uses_count', 1)
    # Remove code if it's a one-time use
    if template.usage_type == 'one_time':
        del existing_codes[code]

    # Save updated codes
    self.tools_db.set(codes_key, json.dumps(existing_codes))

    return {
        'template_name': template.name,
        'usage_type': template.usage_type,
        'uses_count': uses_count
    }

DB

blob_instance

BlobDB

A persistent, encrypted dictionary-like database that uses the BlobStorage system as its backend, making it networked and fault-tolerant.

Source code in toolboxv2/mods/DB/blob_instance.py
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
class BlobDB:
    """
    A persistent, encrypted dictionary-like database that uses the BlobStorage
    system as its backend, making it networked and fault-tolerant.
    """
    auth_type = AuthenticationTypes.location

    def __init__(self):
        self.data: dict = {}
        self.key: str | None = None
        self.db_path: str | None = None
        self.storage_client: BlobStorage | None = None


    def initialize(self, db_path: str, key: str, storage_client: BlobStorage) -> Result:
        """
        Initializes the database from a location within the blob storage.

        Args:
            db_path (str): The virtual path within the blob storage,
                           e.g., "my_database_blob/database.json".
            key (str): The encryption key for the database content.
            storage_client (BlobStorage): An initialized BlobStorage client instance.

        Returns:
            Result: An OK result if successful.
        """
        self.db_path = db_path
        self.key = key
        self.storage_client = storage_client

        print(f"Initializing BlobDB from blob path: '{self.db_path}'...")

        try:
            # Use BlobFile for reading. It handles caching, networking, and decryption.
            db_file = BlobFile(self.db_path, mode='r', storage=self.storage_client, key=self.key)
            if not db_file.exists():
                print(f"Database file not found at '{self.db_path}'. Starting with an empty database.")
                db_file.create()
                self.data = {}
            else:
                with db_file as f:
                    # read_json safely loads the content.
                    self.data = f.read_json()
                    if not self.data:  # Handle case where file exists but is empty
                        self.data = {}
                print("Successfully initialized database.")

        except Exception as e:
            print(f"Warning: Could not initialize BlobDB from '{self.db_path}'. Error: {e}. Starting fresh.")
            self.data = {}

        return Result.ok().set_origin("Blob Dict DB")

    def exit(self) -> Result:
        """
        Saves the current state of the database back to the blob storage.
        """
        print("BLOB DB on exit ", not all([self.key, self.db_path, self.storage_client]))
        if not all([self.key, self.db_path, self.storage_client]):
            return Result.default_internal_error(
                info="Database not initialized. Cannot exit."
            ).set_origin("Blob Dict DB")

        print(f"Saving database to blob path: '{self.db_path}'...")
        try:
            # Use BlobFile for writing. It handles encryption, networking, and updates.
            with BlobFile(self.db_path, mode='w', storage=self.storage_client, key=self.key) as f:
                f.write_json(self.data)

            print("Success: Database saved to blob storage.")
            return Result.ok().set_origin("Blob Dict DB")

        except Exception as e:
            return Result.custom_error(
                data=e,
                info=f"Error saving database to blob storage: {e}"
            ).set_origin("Blob Dict DB")

    # --- Data Manipulation Methods (Unchanged Logic) ---
    # These methods operate on the in-memory `self.data` dictionary and do not
    # need to be changed, as the loading/saving is handled by initialize/exit.

    def get(self, key: str) -> Result:
        if not self.data:
            return Result.default_internal_error(info=f"No data found for key '{key}' (database is empty).").set_origin(
                "Blob Dict DB")

        data = []
        if key == 'all':
            data_info = "Returning all data available"
            data = list(self.data.items())
        elif key == "all-k":
            data_info = "Returning all keys"
            data = list(self.data.keys())
        else:
            data_info = f"Returning values for keys starting with '{key.replace('*', '')}'"
            data = [self.data[k] for k in self.scan_iter(key)]

        if not data:
            return Result.default_internal_error(info=f"No data found for key '{key}'").set_origin("Blob Dict DB")

        return Result.ok(data=data, data_info=data_info).set_origin("Blob Dict DB")

    def set(self, key: str, value) -> Result:
        if not isinstance(key, str) or not key:
            return Result.default_user_error(info="Key must be a non-empty string.").set_origin("Blob Dict DB")

        self.data[key] = value
        return Result.ok().set_origin("Blob Dict DB")

    def scan_iter(self, search: str = ''):
        if not self.data:
            return []
        prefix = search.replace('*', '')
        return [key for key in self.data if key.startswith(prefix)]

    def append_on_set(self, key: str, value: list) -> Result:
        if key not in self.data:
            self.data[key] = []

        if not isinstance(self.data[key], list):
            return Result.default_user_error(info=f"Existing value for key '{key}' is not a list.").set_origin(
                "Blob Dict DB")

        # Use a set for efficient checking to avoid duplicates
        existing_set = set(self.data[key])
        new_items = [item for item in value if item not in existing_set]
        self.data[key].extend(new_items)
        return Result.ok().set_origin("Blob Dict DB")

    def if_exist(self, key: str) -> int:
        if key.endswith('*'):
            return len(self.scan_iter(key))
        return 1 if key in self.data else 0

    def delete(self, key: str, matching: bool = False) -> Result:
        keys_to_delete = []
        if matching:
            keys_to_delete = self.scan_iter(key)
        elif key in self.data:
            keys_to_delete.append(key)

        if not keys_to_delete:
            return Result.default_internal_error(info=f"No keys found to delete for pattern '{key}'").set_origin(
                "Blob Dict DB")

        deleted_items = {k: self.data.pop(k) for k in keys_to_delete}
        return Result.ok(
            data=list(deleted_items.items()),
            data_info=f"Successfully removed {len(deleted_items)} item(s)."
        ).set_origin("Blob Dict DB")
exit()

Saves the current state of the database back to the blob storage.

Source code in toolboxv2/mods/DB/blob_instance.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
def exit(self) -> Result:
    """
    Saves the current state of the database back to the blob storage.
    """
    print("BLOB DB on exit ", not all([self.key, self.db_path, self.storage_client]))
    if not all([self.key, self.db_path, self.storage_client]):
        return Result.default_internal_error(
            info="Database not initialized. Cannot exit."
        ).set_origin("Blob Dict DB")

    print(f"Saving database to blob path: '{self.db_path}'...")
    try:
        # Use BlobFile for writing. It handles encryption, networking, and updates.
        with BlobFile(self.db_path, mode='w', storage=self.storage_client, key=self.key) as f:
            f.write_json(self.data)

        print("Success: Database saved to blob storage.")
        return Result.ok().set_origin("Blob Dict DB")

    except Exception as e:
        return Result.custom_error(
            data=e,
            info=f"Error saving database to blob storage: {e}"
        ).set_origin("Blob Dict DB")
initialize(db_path, key, storage_client)

Initializes the database from a location within the blob storage.

Parameters:

Name Type Description Default
db_path str

The virtual path within the blob storage, e.g., "my_database_blob/database.json".

required
key str

The encryption key for the database content.

required
storage_client BlobStorage

An initialized BlobStorage client instance.

required

Returns:

Name Type Description
Result Result

An OK result if successful.

Source code in toolboxv2/mods/DB/blob_instance.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def initialize(self, db_path: str, key: str, storage_client: BlobStorage) -> Result:
    """
    Initializes the database from a location within the blob storage.

    Args:
        db_path (str): The virtual path within the blob storage,
                       e.g., "my_database_blob/database.json".
        key (str): The encryption key for the database content.
        storage_client (BlobStorage): An initialized BlobStorage client instance.

    Returns:
        Result: An OK result if successful.
    """
    self.db_path = db_path
    self.key = key
    self.storage_client = storage_client

    print(f"Initializing BlobDB from blob path: '{self.db_path}'...")

    try:
        # Use BlobFile for reading. It handles caching, networking, and decryption.
        db_file = BlobFile(self.db_path, mode='r', storage=self.storage_client, key=self.key)
        if not db_file.exists():
            print(f"Database file not found at '{self.db_path}'. Starting with an empty database.")
            db_file.create()
            self.data = {}
        else:
            with db_file as f:
                # read_json safely loads the content.
                self.data = f.read_json()
                if not self.data:  # Handle case where file exists but is empty
                    self.data = {}
            print("Successfully initialized database.")

    except Exception as e:
        print(f"Warning: Could not initialize BlobDB from '{self.db_path}'. Error: {e}. Starting fresh.")
        self.data = {}

    return Result.ok().set_origin("Blob Dict DB")

local_instance

load_from_json(filename)

Lädt Daten aus einer JSON-Datei.

:param filename: Der Dateiname oder Pfad der zu ladenden Datei. :return: Die geladenen Daten.

Source code in toolboxv2/mods/DB/local_instance.py
137
138
139
140
141
142
143
144
145
146
147
148
def load_from_json(filename):
    """
    Lädt Daten aus einer JSON-Datei.

    :param filename: Der Dateiname oder Pfad der zu ladenden Datei.
    :return: Die geladenen Daten.
    """
    if not os.path.exists(filename):
        return {'data': ''}

    with open(filename) as file:
        return json.load(file)
save_to_json(data, filename)

Speichert die übergebenen Daten in einer JSON-Datei.

:param data: Die zu speichernden Daten. :param filename: Der Dateiname oder Pfad, in dem die Daten gespeichert werden sollen.

Source code in toolboxv2/mods/DB/local_instance.py
123
124
125
126
127
128
129
130
131
132
133
134
def save_to_json(data, filename):
    """
    Speichert die übergebenen Daten in einer JSON-Datei.

    :param data: Die zu speichernden Daten.
    :param filename: Der Dateiname oder Pfad, in dem die Daten gespeichert werden sollen.
    """
    if not os.path.exists(filename):
        open(filename, 'a').close()

    with open(filename, 'w+') as file:
        json.dump(data, file, indent=4)

reddis_instance

sync_redis_databases(source_url, target_url)

Synchronize keys from the source Redis database to the target Redis database. This function scans all keys in the source DB and uses DUMP/RESTORE to replicate data to the target.

Parameters:

Name Type Description Default
source_url str

The Redis URL of the source database.

required
target_url str

The Redis URL of the target database.

required

Returns:

Name Type Description
int

The number of keys successfully synchronized.

Source code in toolboxv2/mods/DB/reddis_instance.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def sync_redis_databases(source_url, target_url):
    """Synchronize keys from the source Redis database to the target Redis database.
    This function scans all keys in the source DB and uses DUMP/RESTORE to replicate data to the target.

    Args:
        source_url (str): The Redis URL of the source database.
        target_url (str): The Redis URL of the target database.

    Returns:
        int: The number of keys successfully synchronized.
    """
    try:
        src_client = redis.from_url(source_url)
        tgt_client = redis.from_url(target_url)
    except Exception as e:
        print(f"Error connecting to one of the Redis instances: {e}")
        return 0

    total_synced = 0
    cursor = 0
    try:
        while True:
            cursor, keys = src_client.scan(cursor=cursor, count=100)
            for key in keys:
                try:
                    serialized_value = src_client.dump(key)
                    if serialized_value is None:
                        continue
                    # Restore key with TTL=0 and replace existing key
                    tgt_client.restore(key, 0, serialized_value, replace=True)
                    total_synced += 1
                except Exception as e:
                    print(f"Error syncing key {key}: {e}")
            if cursor == 0:
                break
    except Exception as scan_error:
        print(f"Error during scanning keys: {scan_error}")

    print(f"Synced {total_synced} keys from {source_url} to {target_url}")
    return total_synced

tb_adapter

DB

Bases: ABC

Source code in toolboxv2/mods/DB/tb_adapter.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
class DB(ABC):
    @abc.abstractmethod
    def get(self, query: str) -> Result:
        """get data"""

    @abc.abstractmethod
    def set(self, query: str, value) -> Result:
        """set data"""

    @abc.abstractmethod
    def append_on_set(self, query: str, value) -> Result:
        """append set data"""

    @abc.abstractmethod
    def delete(self, query: str, matching=False) -> Result:
        """delete data"""

    @abc.abstractmethod
    def if_exist(self, query: str) -> bool:
        """return True if query exists"""

    @abc.abstractmethod
    def exit(self) -> Result:
        """Close DB connection and optional save data"""
append_on_set(query, value) abstractmethod

append set data

Source code in toolboxv2/mods/DB/tb_adapter.py
64
65
66
@abc.abstractmethod
def append_on_set(self, query: str, value) -> Result:
    """append set data"""
delete(query, matching=False) abstractmethod

delete data

Source code in toolboxv2/mods/DB/tb_adapter.py
68
69
70
@abc.abstractmethod
def delete(self, query: str, matching=False) -> Result:
    """delete data"""
exit() abstractmethod

Close DB connection and optional save data

Source code in toolboxv2/mods/DB/tb_adapter.py
76
77
78
@abc.abstractmethod
def exit(self) -> Result:
    """Close DB connection and optional save data"""
get(query) abstractmethod

get data

Source code in toolboxv2/mods/DB/tb_adapter.py
56
57
58
@abc.abstractmethod
def get(self, query: str) -> Result:
    """get data"""
if_exist(query) abstractmethod

return True if query exists

Source code in toolboxv2/mods/DB/tb_adapter.py
72
73
74
@abc.abstractmethod
def if_exist(self, query: str) -> bool:
    """return True if query exists"""
set(query, value) abstractmethod

set data

Source code in toolboxv2/mods/DB/tb_adapter.py
60
61
62
@abc.abstractmethod
def set(self, query: str, value) -> Result:
    """set data"""

ui

api_change_mode(self, request) async

Changes the database mode from a JSON POST body.

Source code in toolboxv2/mods/DB/ui.py
 94
 95
 96
 97
 98
 99
100
101
@export(mod_name=Name, name="api_change_mode", api=True, api_methods=['POST'], request_as_kwarg=True)
async def api_change_mode(self, request: RequestData):
    """Changes the database mode from a JSON POST body."""
    data = request.body
    if not data or "mode" not in data:
        return Result.default_user_error("Request body must contain 'mode'.")
    new_mode = data.get("mode", "LC")
    return self.edit_programmable(DatabaseModes.crate(new_mode))
api_delete_key(self, request) async

Deletes a key from a JSON POST body.

Source code in toolboxv2/mods/DB/ui.py
82
83
84
85
86
87
88
89
90
91
@export(mod_name=Name, name="api_delete_key", api=True, api_methods=['POST'], request_as_kwarg=True)
async def api_delete_key(self, request: RequestData):
    """Deletes a key from a JSON POST body."""
    data = request.body
    if not data or 'key' not in data:
        return Result.default_user_error("Request body must contain 'key'.")
    key = data['key']
    if not key:
        return Result.default_user_error("Key parameter is required.")
    return self.delete(key)
api_get_all_keys(self, request) async

Returns a list of all keys in the database.

Source code in toolboxv2/mods/DB/ui.py
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
@export(mod_name=Name, name="api_get_all_keys", api=True, request_as_kwarg=True)
async def api_get_all_keys(self, request: RequestData):
    """Returns a list of all keys in the database."""
    if self.data_base:
        keys_result = self.data_base.get('all-k')
        if keys_result.is_error():
            return keys_result

        unwrapped_keys = _unwrap_data(keys_result.get())
        if not isinstance(unwrapped_keys, list):
            self.app.logger.warning(f"get_all_keys did not return a list. Got: {type(unwrapped_keys)}")
            return Result.json(data=[])

        return Result.json(data=sorted(unwrapped_keys))
    return Result.default_internal_error("DB not initialized")
api_get_status(self, request) async

Returns the current status of the DB manager.

Source code in toolboxv2/mods/DB/ui.py
23
24
25
26
@export(mod_name=Name, name="api_get_status", api=True, request_as_kwarg=True)
async def api_get_status(self, request: RequestData):
    """Returns the current status of the DB manager."""
    return Result.json(data={"mode": self.mode})
api_get_value(self, request, key) async

Gets a value for a key and returns it as JSON-friendly text.

Source code in toolboxv2/mods/DB/ui.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
@export(mod_name=Name, name="api_get_value", api=True, request_as_kwarg=True)
async def api_get_value(self, request: RequestData, key: str):
    """Gets a value for a key and returns it as JSON-friendly text."""
    if not key:
        return Result.default_user_error("Key parameter is required.")
    value_res = self.get(key)
    if value_res.is_error():
        return value_res

    value_unwrapped = _unwrap_data(value_res.get())

    if isinstance(value_unwrapped, bytes):
        try:
            value_str = value_unwrapped.decode('utf-8')
        except UnicodeDecodeError:
            value_str = str(value_unwrapped)
    else:
        value_str = str(value_unwrapped)

    # Simplified for a JSON-focused UI. The client will handle formatting.
    return Result.json(data={"key": key, "value": value_str})
api_set_value(self, request) async

Sets a key-value pair from a JSON POST body.

Source code in toolboxv2/mods/DB/ui.py
69
70
71
72
73
74
75
76
77
78
79
@export(mod_name=Name, name="api_set_value", api=True, api_methods=['POST'], request_as_kwarg=True)
async def api_set_value(self, request: RequestData):
    """Sets a key-value pair from a JSON POST body."""
    data = request.body
    if not data or 'key' not in data or 'value' not in data:
        return Result.default_user_error("Request body must contain 'key' and 'value'.")
    key = data['key']
    value = data['value']
    if not key:
        return Result.default_user_error("Key cannot be empty.")
    return self.set(key, value)
db_manager_ui(**kwargs)

Serves the refactored, JSON-focused UI for the DB Manager.

Source code in toolboxv2/mods/DB/ui.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
@export(mod_name=Name, name="ui", api=True, state=False)
def db_manager_ui(**kwargs):
    """Serves the refactored, JSON-focused UI for the DB Manager."""
    html_content = """
    <!DOCTYPE html>
    <html lang="en">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>DB Manager</title>
        <style>
            :root {
                --font-family-sans: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
                --font-family-mono: "SF Mono", "Menlo", "Monaco", "Courier New", Courier, monospace;
                --color-bg: #f8f9fa;
                --color-panel-bg: #ffffff;
                --color-border: #dee2e6;
                --color-text: #212529;
                --color-text-muted: #6c757d;
                --color-primary: #0d6efd;
                --color-primary-hover: #0b5ed7;
                --color-danger: #dc3545;
                --color-danger-hover: #bb2d3b;
                --color-key-folder-icon: #f7b731;
                --color-key-file-icon: #adb5bd;
                --color-key-hover-bg: #e9ecef;
                --color-key-selected-bg: #0d6efd;
                --color-key-selected-text: #ffffff;
                --shadow-sm: 0 1px 2px 0 rgba(0, 0, 0, 0.05);
                --radius: 0.375rem;
            }

            /* Basic styles */
            * { box-sizing: border-box; }
            html { font-size: 16px; }

            body {
                font-family: var(--font-family-sans);
                background-color: var(--color-bg);
                color: var(--color-text);
                margin: 0;
                padding: 1rem;
                display: flex;
                flex-direction: column;
                height: 100vh;
            }

            /* Main layout */
            .db-manager-container { display: flex; flex-direction: column; height: 100%; gap: 1rem; }
            .db-header { display: flex; justify-content: space-between; align-items: center; padding-bottom: 1rem; border-bottom: 1px solid var(--color-border); flex-shrink: 0; }
            .db-main-content { display: flex; gap: 1rem; flex: 1; min-height: 0; }

            /* Panels */
            .db-panel { background-color: var(--color-panel-bg); border: 1px solid var(--color-border); border-radius: var(--radius); box-shadow: var(--shadow-sm); display: flex; flex-direction: column; min-height: 0; }
            .key-panel { width: 350px; min-width: 250px; max-width: 450px; }
            .editor-panel, .placeholder-panel { flex-grow: 1; }
            .panel-header { display: flex; justify-content: space-between; align-items: center; padding: 0.75rem 1rem; border-bottom: 1px solid var(--color-border); flex-shrink: 0; }
            .panel-header h2 { font-size: 1.1rem; margin: 0; white-space: nowrap; overflow: hidden; text-overflow: ellipsis; }

            /* Controls */
            select, input[type="text"], textarea, button { font-size: 1rem; }
            select, input[type="text"] { background-color: var(--color-bg); color: var(--color-text); border: 1px solid var(--color-border); border-radius: var(--radius); padding: 0.5rem 0.75rem; }
            select:focus, input[type="text"]:focus, textarea:focus { outline: 2px solid var(--color-primary); outline-offset: -1px; }
            button { border: none; border-radius: var(--radius); padding: 0.5rem 1rem; font-weight: 500; cursor: pointer; transition: background-color 0.2s; }
            button.primary { background-color: var(--color-primary); color: white; }
            button.primary:hover { background-color: var(--color-primary-hover); }
            button.danger { background-color: var(--color-danger); color: white; }
            button.danger:hover { background-color: var(--color-danger-hover); }
            .header-actions { display: flex; gap: 0.5rem; }

            /* Key Tree View */
            #keySearchInput { width: calc(100% - 2rem); margin: 1rem; flex-shrink: 0; }
            .key-tree-container { font-family: var(--font-family-mono); font-size: 0.9rem; padding: 0 0.5rem 1rem; overflow-y: auto; flex: 1; min-height: 0; }
            .key-tree-container ul { list-style: none; padding-left: 0; margin: 0; }
            .key-tree-container li { padding-left: 20px; position: relative; }
            .node-label { display: flex; align-items: center; padding: 4px 8px; cursor: pointer; border-radius: 4px; word-break: break-all; user-select: none; }
            .node-label:hover { background-color: var(--color-key-hover-bg); }
            .node-label.selected { background-color: var(--color-key-selected-bg); color: var(--color-key-selected-text); }
            .node-label.selected .node-icon { color: var(--color-key-selected-text) !important; }
            .node-icon { width: 20px; text-align: center; margin-right: 5px; flex-shrink: 0; }
            .tree-folder > .node-label .node-icon { color: var(--color-key-folder-icon); font-style: normal; }
            .tree-folder > .node-label .node-icon::before { content: '▸'; display: inline-block; transition: transform 0.15s ease-in-out; }
            .tree-folder.open > .node-label .node-icon::before { transform: rotate(90deg); }
            .tree-leaf > .node-label .node-icon { color: var(--color-key-file-icon); }
            .tree-leaf > .node-label .node-icon::before { content: '•'; }
            .tree-children { display: none; }
            .tree-folder.open > .tree-children { display: block; }

            /* Editor Panel */
            .editor-toolbar { display: flex; gap: 1rem; align-items: center; padding: 0.75rem 1rem; border-bottom: 1px solid var(--color-border); flex-shrink: 0; }
            #valueEditor { flex: 1; width: 100%; min-height: 0; border: none; resize: none; font-family: var(--font-family-mono); font-size: 0.95rem; line-height: 1.5; padding: 1rem; background: transparent; color: var(--color-text); }
            #valueEditor:focus { outline: none; }

            /* Placeholder and Utility */
            .placeholder-panel { display: flex; flex-direction: column; align-items: center; justify-content: center; color: var(--color-text-muted); text-align: center; }
            .hidden { display: none !important; }
            .key-tree-container p.status-message { padding: 1rem; margin: 0; color: var(--color-text-muted); text-align: center; }

            /* Custom Scrollbars */
            .key-tree-container::-webkit-scrollbar, #valueEditor::-webkit-scrollbar { width: 8px; height: 8px; }
            .key-tree-container::-webkit-scrollbar-track, #valueEditor::-webkit-scrollbar-track { background: transparent; }
            .key-tree-container::-webkit-scrollbar-thumb, #valueEditor::-webkit-scrollbar-thumb { background-color: var(--color-border); border-radius: 4px; }
            .key-tree-container::-webkit-scrollbar-thumb:hover, #valueEditor::-webkit-scrollbar-thumb:hover { background-color: var(--color-text-muted); }
            #valueEditor::-webkit-scrollbar-corner { background: transparent; }

            /* Responsive */
            @media (max-width: 768px) {
                body { padding: 0.5rem; }
                .db-main-content { flex-direction: column; }
                .key-panel { width: 100%; max-height: 40vh; }
            }
        </style>
    </head>
    <body>
        <div id="dbManagerContainer" class="db-manager-container">
            <header class="db-header">
                <h1>DB Manager</h1>
                <div class="db-mode-selector">
                    <label for="modeSelect">Mode:</label>
                    <select id="modeSelect">
                        <option value="LC">Local Dict</option>
                        <option value="CB">Cloud Blob</option>
                        <option value="LR">Local Redis</option>
                        <option value="RR">Remote Redis</option>
                    </select>
                </div>
            </header>
            <main class="db-main-content">
                <aside id="keyPanel" class="db-panel key-panel">
                    <div class="panel-header">
                        <h2>Keys</h2>
                        <div class="header-actions">
                            <button id="addKeyBtn" title="Add New Key" style="font-size: 1.2rem;">+</button>
                            <button id="refreshKeysBtn" title="Refresh Keys">🔄</button>
                        </div>
                    </div>
                    <input type="text" id="keySearchInput" placeholder="Search keys...">
                    <div id="keyTreeContainer" class="key-tree-container"></div>
                </aside>
                <section id="editorPanel" class="db-panel editor-panel hidden">
                    <div class="panel-header">
                        <h2 id="selectedKey"></h2>
                        <div class="header-actions">
                            <button id="saveBtn" class="primary">Save</button>
                            <button id="deleteBtn" class="danger">Delete</button>
                        </div>
                    </div>
                    <div class="editor-toolbar">
                        <button id="formatBtn">Format JSON</button>
                    </div>
                    <textarea id="valueEditor" placeholder="Select a key to view its value..."></textarea>
                </section>
                <section id="placeholderPanel" class="db-panel editor-panel placeholder-panel">
                    <h3>Select a key to get started</h3>
                    <p>Or click the '+' button to add a new one.</p>
                </section>
            </main>
        </div>
        <script>
        (() => {
            "use strict";
            const API_NAME = "DB";

            class DBManager {
                constructor() {
                    this.cache = {
                        keys: [],
                        selectedKey: null
                    };
                    this.dom = {
                        modeSelect: document.getElementById('modeSelect'),
                        keySearchInput: document.getElementById('keySearchInput'),
                        keyTreeContainer: document.getElementById('keyTreeContainer'),
                        editorPanel: document.getElementById('editorPanel'),
                        placeholderPanel: document.getElementById('placeholderPanel'),
                        selectedKey: document.getElementById('selectedKey'),
                        valueEditor: document.getElementById('valueEditor'),
                        addKeyBtn: document.getElementById('addKeyBtn'),
                        refreshKeysBtn: document.getElementById('refreshKeysBtn'),
                        saveBtn: document.getElementById('saveBtn'),
                        deleteBtn: document.getElementById('deleteBtn'),
                        formatBtn: document.getElementById('formatBtn'),
                    };
                    this.init();
                }

                async init() {
                    this.setStatusMessage('Loading...');
                    this.addEventListeners();
                    await this.loadInitialStatus();
                    await this.loadKeys();
                }

                addEventListeners() {
                    this.dom.refreshKeysBtn.addEventListener('click', () => this.loadKeys());
                    this.dom.addKeyBtn.addEventListener('click', () => this.showAddKeyModal());
                    this.dom.saveBtn.addEventListener('click', () => this.saveValue());
                    this.dom.deleteBtn.addEventListener('click', () => this.confirmDeleteKey());
                    this.dom.formatBtn.addEventListener('click', () => this.formatJson());
                    this.dom.keySearchInput.addEventListener('input', (e) => this.renderKeyTree(e.target.value));
                    this.dom.modeSelect.addEventListener('change', (e) => this.changeMode(e.target.value));

                    this.dom.keyTreeContainer.addEventListener('click', (e) => {
                        const label = e.target.closest('.node-label');
                        if (!label) return;
                        const node = label.parentElement;
                        if (node.classList.contains('tree-folder')) {
                            node.classList.toggle('open');
                        } else if (node.dataset.key) {
                            this.selectKey(node.dataset.key);
                        }
                    });
                }

                async apiRequest(endpoint, payload = null, method = 'POST') {
                    if (!window.TB?.api?.request) {
                        console.error("TB.api not available!");
                        return { error: true, message: "TB.api not available" };
                    }
                    try {
                        const url = (method === 'GET' && payload) ? `${endpoint}?${new URLSearchParams(payload)}` : endpoint;
                        const body = (method !== 'GET') ? payload : null;
                        const response = await window.TB.api.request(API_NAME, url, body, method);

                        if (response.error && response.error !== 'none') {
                            const errorMsg = response.info?.help_text || response.error;
                            console.error(`API Error on ${endpoint}:`, errorMsg, response);
                            if (window.TB?.ui?.Toast) TB.ui.Toast.showError(errorMsg, { duration: 5000 });
                            return { error: true, message: errorMsg, data: response.get() };
                        }
                        return { error: false, data: response.get() };
                    } catch (err) {
                        console.error("Framework/Network Error:", err);
                        if (window.TB?.ui?.Toast) TB.ui.Toast.showError("Application or network error.", { duration: 5000 });
                        return { error: true, message: "Network error" };
                    }
                }

                async loadInitialStatus() {
                    const res = await this.apiRequest('api_get_status', null, 'GET');
                    if (!res.error) this.dom.modeSelect.value = res.data.mode;
                }

                async loadKeys() {
                    this.setStatusMessage('Loading keys...');
                    const res = await this.apiRequest('api_get_all_keys', null, 'GET');
                    if (!res.error) {
                        this.cache.keys = res.data || [];
                        this.renderKeyTree();
                    } else {
                        this.setStatusMessage('Failed to load keys.', true);
                    }
                }

                renderKeyTree(filter = '') {
                    const treeData = {};
                    const filteredKeys = this.cache.keys.filter(k => k.toLowerCase().includes(filter.toLowerCase().trim()));

                    for (const key of filteredKeys) {
                        let currentLevel = treeData;
                        const parts = key.split(':');
                        for (let i = 0; i < parts.length; i++) {
                            const part = parts[i];
                            if (!part) continue; // Skip empty parts from keys like "a::b"
                            const isLeaf = i === parts.length - 1;

                            if (!currentLevel[part]) {
                                currentLevel[part] = { _children: {} };
                            }
                            if (isLeaf) {
                                currentLevel[part]._fullKey = key;
                            }
                            currentLevel = currentLevel[part]._children;
                        }
                    }

                    const treeHtml = this.buildTreeHtml(treeData);
                    if (treeHtml) {
                        this.dom.keyTreeContainer.innerHTML = `<ul class="key-tree">${treeHtml}</ul>`;
                        // Re-select the key if it's still visible
                        if (this.cache.selectedKey) {
                             const nodeEl = this.dom.keyTreeContainer.querySelector(`[data-key="${this.cache.selectedKey}"] .node-label`);
                             if(nodeEl) nodeEl.classList.add('selected');
                        }
                    } else {
                         this.setStatusMessage(filter ? 'No keys match your search.' : 'No keys found.');
                    }
                }

                buildTreeHtml(node) {
                    return Object.keys(node).sort().map(key => {
                        const childNode = node[key];
                        const isFolder = Object.keys(childNode._children).length > 0;

                        if (isFolder) {
                            return `<li class="tree-folder" ${childNode._fullKey ? `data-key="${childNode._fullKey}"`: ''}>
                                        <div class="node-label"><i class="node-icon"></i>${key}</div>
                                        <ul class="tree-children">${this.buildTreeHtml(childNode._children)}</ul>
                                    </li>`;
                        } else {
                            return `<li class="tree-leaf" data-key="${childNode._fullKey}">
                                        <div class="node-label"><i class="node-icon"></i>${key}</div>
                                    </li>`;
                        }
                    }).join('');
                }

                async selectKey(key) {
                    if (!key) return;
                    this.showEditor(true);
                    this.cache.selectedKey = key;

                    document.querySelectorAll('.node-label.selected').forEach(el => el.classList.remove('selected'));
                    const nodeEl = this.dom.keyTreeContainer.querySelector(`[data-key="${key}"] > .node-label`);
                    if (nodeEl) nodeEl.classList.add('selected');

                    this.dom.selectedKey.textContent = key;
                    this.dom.selectedKey.title = key;
                    this.dom.valueEditor.value = "Loading...";

                    const res = await this.apiRequest('api_get_value', { key }, 'GET');
                    this.dom.valueEditor.value = res.error ? `Error: ${res.message}` : res.data.value;
                    if (!res.error) this.formatJson(false); // Auto-format if it's valid JSON, without showing an error
                }

                async saveValue() {
                    if (!this.cache.selectedKey) return;
                    if (window.TB?.ui?.Loader) TB.ui.Loader.show("Saving...");
                    const res = await this.apiRequest('api_set_value', {
                        key: this.cache.selectedKey,
                        value: this.dom.valueEditor.value
                    });
                    if (window.TB?.ui?.Loader) TB.ui.Loader.hide();
                    if (!res.error && window.TB?.ui?.Toast) TB.ui.Toast.showSuccess("Key saved successfully!");
                }

                async confirmDeleteKey() {
                    if (!this.cache.selectedKey) return;
                    if (!window.TB?.ui?.Modal) {
                        if(confirm(`Delete key "${this.cache.selectedKey}"?`)) this.deleteKey();
                        return;
                    }
                    TB.ui.Modal.confirm({
                        title: 'Delete Key?',
                        content: `Are you sure you want to delete the key "<strong>${this.cache.selectedKey}</strong>"?<br/>This action cannot be undone.`,
                        confirmButtonText: 'Delete',
                        confirmButtonVariant: 'danger',
                        onConfirm: () => this.deleteKey()
                    });
                }

                async deleteKey() {
                    const keyToDelete = this.cache.selectedKey;
                    if (!keyToDelete) return;
                    if (window.TB?.ui?.Loader) TB.ui.Loader.show("Deleting...");
                    const res = await this.apiRequest('api_delete_key', { key: keyToDelete });
                    if (window.TB?.ui?.Loader) TB.ui.Loader.hide();

                    if (!res.error) {
                        if (window.TB?.ui?.Toast) TB.ui.Toast.showSuccess(`Key "${keyToDelete}" deleted.`);
                        this.cache.selectedKey = null;
                        this.showEditor(false);
                        this.loadKeys(); // Refresh the key list
                    }
                }

                formatJson(showErrorToast = true) {
                    try {
                        const currentVal = this.dom.valueEditor.value.trim();
                        if (!currentVal) return;
                        const formatted = JSON.stringify(JSON.parse(currentVal), null, 2);
                        this.dom.valueEditor.value = formatted;
                    } catch (e) {
                        if (showErrorToast && window.TB?.ui?.Toast) {
                            TB.ui.Toast.showWarning("Value is not valid JSON.", { duration: 3000 });
                        }
                    }
                }

                showAddKeyModal() {
                     if (!window.TB?.ui?.Modal) { alert("Add Key modal not available."); return; }
                     TB.ui.Modal.show({
                        title: 'Add New Key',
                        content: `<input type="text" id="newKeyInput" placeholder="Enter new key name (e.g., app:settings:user)" style="width: 100%; margin-bottom: 1rem;"/>
                                  <textarea id="newValueInput" placeholder='Enter value (e.g., {"theme": "dark"})' style="width: 100%; height: 150px; font-family: var(--font-family-mono);"></textarea>`,
                        onOpen: (modal) => document.getElementById('newKeyInput').focus(),
                        buttons: [{
                            text: 'Save', variant: 'primary',
                            action: async (modal) => {
                                const newKey = document.getElementById('newKeyInput').value.trim();
                                const newValue = document.getElementById('newValueInput').value;
                                if (!newKey) { if (window.TB?.ui?.Toast) TB.ui.Toast.showError("Key name cannot be empty."); return; }
                                modal.close();
                                if (window.TB?.ui.Loader) TB.ui.Loader.show("Saving...");
                                const res = await this.apiRequest('api_set_value', { key: newKey, value: newValue });
                                if (window.TB?.ui.Loader) TB.ui.Loader.hide();
                                if (!res.error) {
                                    if (window.TB?.ui?.Toast) TB.ui.Toast.showSuccess("New key created!");
                                    await this.loadKeys();
                                    this.selectKey(newKey);
                                }
                            }
                        }, { text: 'Cancel', action: (modal) => modal.close() }]
                    });
                }

                async changeMode(newMode) {
                    if (window.TB?.ui?.Loader) TB.ui.Loader.show(`Switching to ${newMode}...`);
                    const res = await this.apiRequest('api_change_mode', { mode: newMode });
                    if (!res.error) {
                       this.cache.selectedKey = null;
                       this.showEditor(false);
                       await this.loadKeys();
                       if (window.TB?.ui?.Toast) TB.ui.Toast.showSuccess(`Switched to ${newMode} mode.`);
                    } else {
                       if (window.TB?.ui?.Toast) TB.ui.Toast.showError(`Failed to switch mode.`);
                       await this.loadInitialStatus(); // Revert dropdown to actual status
                    }
                    if (window.TB?.ui?.Loader) TB.ui.Loader.hide();
                }

                showEditor(show) {
                    this.dom.editorPanel.classList.toggle('hidden', !show);
                    this.dom.placeholderPanel.classList.toggle('hidden', show);
                }

                setStatusMessage(message, isError = false) {
                    this.dom.keyTreeContainer.innerHTML = `<p class="status-message" style="${isError ? 'color: var(--color-danger);' : ''}">${message}</p>`;
                }
            }

            // Defer initialization until the ToolboxV2 framework is ready

             function onTbReady() { new DBManager(); }
             if (window.TB?.events) {
    if (window.TB.config?.get('appRootId')) { // A sign that TB.init might have run
         onTbReady();
    } else {
        window.TB.events.on('tbjs:initialized', onTbReady, { once: true });
    }
} else {
    // Fallback if TB is not even an object yet, very early load
    document.addEventListener('tbjs:initialized', onTbReady, { once: true }); // Custom event dispatch from TB.init
}

        })();
        </script>
    </body>
    </html>
    """
    app = get_app(Name)
    try:
        # Prepend the web context to include necessary framework scripts (like TB.js)
        web_context = app.web_context()
        return Result.html(web_context + html_content)
    except Exception:
        # Fallback in case web_context is not available
        return Result.html(html_content)

EventManager

module

EventManagerClass
Source code in toolboxv2/mods/EventManager/module.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
class EventManagerClass:
    events: set[Event] = set()
    source_id: str
    _name: str
    _identification: str

    routes_client: dict[str, ProxyRout] = {}
    routers_servers: dict[str, DaemonRout] = {}
    routers_servers_tasks: list[Any] = []
    routers_servers_tasks_running_flag: bool = False

    receiver_que: queue.Queue
    response_que: queue.Queue

    def add_c_route(self, name, route: ProxyRout):
        self.routes_client[name] = route

    async def receive_all_client_data(self):

        close_connections = []
        add_ev = []
        for name, client in self.routes_client.items():
            if client.client is None or not client.client.get('alive', False):
                close_connections.append(name)
                continue
            data = client.r

            if isinstance(data, str) and data == "No data":
                continue
            elif isinstance(data, EventID) and len(data.get_source()) != 0:
                await self.trigger_event(data)
            elif isinstance(data, EventID) and len(data.get_source()) == 0:
                print(f"Event returned {data.payload}")
                self.response_que.put(data)
            elif isinstance(data,
                            dict) and 'error' in data and 'origin' in data and 'result' in data and 'info' in data:

                self.response_que.put(Result.result_from_dict(**data).print())
            elif isinstance(data,
                            dict) and 'source' in data and 'path' in data and 'ID' in data and 'identifier' in data:
                del data['identifier']
                ev_id = EventID(**data)
                await self.trigger_event(ev_id)
            elif isinstance(data, Event):
                print("Event:", str(data.event_id), data.name)
                add_ev.append(data)
            elif isinstance(data, Result):
                self.response_que.put(data.print())
            else:
                print(f"Unknown Data {data}")

        for ev in add_ev:
            await self.register_event(ev)

        for client_name in close_connections:
            print(f"Client {client_name} closing connection")
            self.remove_c_route(client_name)

    def remove_c_route(self, name):
        self.routes_client[name].close()
        del self.routes_client[name]

    def crate_rout(self, source, addr=None):
        if addr is None:
            addr = ('0.0.0.0', 6588)
        host, port = addr
        if isinstance(port, str):
            port = int(port)
        return Rout(
            _from=self.source_id,
            _to=source,
            _from_port=int(os.getenv("TOOLBOXV2_BASE_PORT", 6588)),
            _from_host=os.getenv("TOOLBOXV2_BASE_HOST"),
            _to_port=port,
            _to_host=host,
            routing_function=self.routing_function_router,
        )

    def __init__(self, source_id, _identification="PN"):
        self.bo = False
        self.running = False
        self.source_id = source_id
        self.receiver_que = queue.Queue()
        self.response_que = queue.Queue()
        self._identification = _identification
        self._name = self._identification + '-' + str(uuid.uuid4()).split('-')[1]
        self.routes = {}
        self.logger = get_logger()

    @property
    def identification(self) -> str:
        return self._identification

    @identification.setter
    def identification(self, _identification: str):
        self.stop()
        self._identification = _identification
        self._name = self._identification + '-' + str(uuid.uuid4()).split('-')[1]

    async def identity_post_setter(self):

        do_reconnect = len(list(self.routers_servers.keys())) > 0
        if self._identification == "P0":
            await self.add_server_route(self._identification, ('0.0.0.0', 6568))
        if self._identification == "P0|S0":
            await self.add_server_route(self._identification, ('0.0.0.0', 6567))

        await asyncio.sleep(0.1)
        self.start()
        await asyncio.sleep(0.1)
        if do_reconnect:
            self.reconnect("ALL")

    async def open_connection_server(self, port):
        await self.add_server_route(self._identification, ('0.0.0.0', port))

    def start(self):
        self.running = True
        threading.Thread(target=async_test(self.receiver), daemon=True).start()

    def make_event_from_fuction(self, fuction, name, *args, source_types=SourceTypes.F,
                                scope=Scope.local,
                                exec_in=ExecIn.local,
                                threaded=False, **kwargs):

        return Event(source=fuction,
                     name=name,
                     event_id=EventID.crate_with_source(self.source_id), args=args,
                     kwargs_=kwargs,
                     source_types=source_types,
                     scope=scope,
                     exec_in=exec_in,
                     threaded=threaded,
                     )

    async def add_client_route(self, source_id, addr):
        if source_id in self.routes_client:
            if self.routes_client[source_id].client is None or not self.routes_client[source_id].client.get('alive'):
                await self.routes_client[source_id].reconnect()
                return True
            print("Already connected")
            return False
        try:
            pr = await ProxyRout.toProxy(rout=self.crate_rout(source_id, addr=addr), name=source_id)
            await asyncio.sleep(0.1)
            await pr.client.get('sender')({"id": self._identification,
                                           "continue": False,
                                           "key": os.getenv('TB_R_KEY', 'root@remote')})
            await asyncio.sleep(0.1)
            self.add_c_route(source_id, pr)
            return True
        except Exception as e:
            print(f"Check the port {addr} Sever likely not Online : {e}")
            return False

    async def add_mini_client(self, name: str, addr: tuple[str, int]):

        mini_proxy = await ProxyRout(class_instance=None, timeout=15, app=get_app(),
                                     remote_functions=[""], peer=False, name=name, do_connect=False)

        async def _(x):
            return await self.routers_servers[self._identification].send(x, addr)

        mini_proxy.put_data = _
        mini_proxy.connect = lambda *x, **_: None
        mini_proxy.reconnect = lambda *x, **_: None
        mini_proxy.close = lambda *x, **_: None
        mini_proxy.client = {'alive': True}
        mini_proxy.r = "No data"
        self.routes_client[name] = mini_proxy

    async def on_register(self, id_, data):
        try:
            if "unknown" not in self.routes:
                self.routes["unknown"] = {}

            if id_ != "new_con" and 'id' in data:
                id_data = data.get('id')
                id_ = eval(id_)
                c_host, c_pot = id_
                print(f"Registering: new client {id_data} : {c_host, c_pot}")
                if id_data not in self.routes_client:
                    await self.add_mini_client(id_data, (c_host, c_pot))
                    self.routes[str((c_host, c_pot))] = id_data

            # print("self.routes:", self.routes)
        except Exception as e:
            print("Error in on_register", str(e))

    def on_client_exit(self, id_):

        if isinstance(id_, str):
            id_ = eval(id_)

        c_name = self.routes.get(id_)

        if c_name is None:
            return

        if c_name in self.routes_client:
            self.remove_c_route(c_name)
            print(f"Removed route to {c_name}")

    async def add_server_route(self, source_id, addr=None):
        if addr is None:
            addr = ('0.0.0.0', 6588)
        try:
            self.routers_servers[source_id] = await DaemonRout(rout=self.crate_rout(source_id, addr=addr),
                                                               name=source_id,
                                                               on_r=self.on_register)
            self.routers_servers_tasks.append(self.routers_servers[source_id].online)
        except Exception as e:
            print(f"Sever already Online : {e}")

        if not self.routers_servers_tasks_running_flag:
            self.routers_servers_tasks_running_flag = True
            threading.Thread(target=self.server_route_runner, daemon=True).start()

    def server_route_runner(self):
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

        # Sammle alle Ergebnisse zusammen
        results = loop.run_until_complete(asyncio.gather(*self.routers_servers_tasks))

        for result in results:
            print(result)

        loop.close()
        self.routers_servers_tasks_running_flag = False

    async def add_js_route(self, source_id="js:web"):
        await self.add_server_route(source_id, ("./web/scripts/tb_socket.sock", 0))

    async def register_event(self, event: Event):

        if event in self.events:
            return Result.default_user_error("Event registration failed Event already registered")

        print(f"Registration new Event : {event.name}, {str(event.event_id)}")
        self.events.add(event)

        if event.scope.name == Scope.instance.name:
            return

        if event.scope.name == Scope.local.name:
            if not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                            "localhost") != "localhost":
                await self.add_client_route("P0", (os.getenv("TOOLBOXV2_BASE_HOST", "localhost"),
                                                   os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
                self.bo = True
            return

        if event.scope.name == Scope.local_network.name:
            if self.identification == "P0" and not self.bo:
                t0 = threading.Thread(target=self.start_brodcast_router_local_network, daemon=True)
                t0.start()
            elif not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                              "localhost") == "localhost":
                self.bo = True
                # self.add_server_route(self.identification, ("127.0.0.1", 44667))
                with Spinner(message="Sercheing for Rooter instance", count_down=True, time_in_s=6):
                    with ThreadPoolExecutor(max_workers=1) as executor:
                        t0 = executor.submit(make_known, self.identification)
                        try:
                            data = t0.result(timeout=6)
                        except TimeoutError:
                            print("No P0 found in network or on device")
                            return
                    print(f"Found P0 on {type(data)} {data.get('host')}")
                    await self.add_client_route("P0", (data.get("host"), os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
            elif not self.bo and "P0" not in self.routes_client and os.getenv("TOOLBOXV2_BASE_HOST",
                                                                              "localhost") != "localhost":
                do = await self.add_client_route("P0", (
                    os.getenv("TOOLBOXV2_BASE_HOST", "localhost"), os.getenv("TOOLBOXV2_BASE_PORT", 6568)))
                self.bo = do
                if not do:
                    print("Connection failed")
                    os.environ["TOOLBOXV2_BASE_HOST"] = "localhost"

        if event.scope.name == Scope.global_network.name:
            await self.add_server_route(self.source_id, ('0.0.0.0', os.getenv("TOOLBOXV2_REMOTE_PORT", 6587)))

    async def connect_to_remote(self, host=os.getenv("TOOLBOXV2_REMOTE_IP"),
                                port=os.getenv("TOOLBOXV2_REMOTE_PORT", 6587)):
        await self.add_client_route("S0", (host, port))

    def start_brodcast_router_local_network(self):
        self.bo = True

        # print("Starting brodcast router 0")
        router = start_client(get_local_ip())
        # print("Starting brodcast router 1")
        # next(router)
        # print("Starting brodcast router")
        while self.running:
            source_id, connection = next(router)
            print(f"Infos :{source_id}, connection :{connection}")
            self.routes[source_id] = connection[0]
            router.send(self.running)

        router.send("e")
        router.close()

    def _get_event_by_id_or_name(self, event_id: str or EventID):
        if isinstance(event_id, str):
            events = [e for e in self.events if e.name == event_id]
            if len(events) < 1:
                return Result.default_user_error("Event not registered")
            event = events[0]

        elif isinstance(event_id, EventID):
            events = [e for e in self.events if e.event_id.ID == event_id.ID]
            if len(events) < 1:
                events = [e for e in self.events if e.name == event_id.ID]
            if len(events) < 1:
                return Result.default_user_error("Event not registered")
            event = events[0]

        elif isinstance(event_id, Event):
            if event_id not in self.events:
                return Result.default_user_error("Event not registered")
            event = event_id

        else:
            event = Result.default_user_error("Event not registered")

        return event

    def remove_event(self, event: Event or EventID or str):

        event = self._get_event_by_id_or_name(event)
        if isinstance(event, Event):
            self.events.remove(event)
        else:
            return event

    async def _trigger_local(self, event_id: EventID):
        """
        Exec source based on

        source_types
            F -> call directly
            R -> use get_app(str(event_id)).run_any(*args, **kwargs)
            S -> evaluate string
        scope
            instance -> _trigger_local
            local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
            local_network -> use proxy0 app to communicate withe Daemon0 then local
            global_network ->
        exec_in
        event_id
        threaded

                       """
        event = self._get_event_by_id_or_name(event_id)

        if isinstance(event, Result):
            event.print()
            if self.identification == "P0":
                return event
            print(f"Routing to P0 {self.events}")
            if self.source_id not in self.routes_client:
                # self.routers[self.source_id] = DaemonRout(rout=self.crate_rout(self.source_id))
                await self.add_client_route("P0", ('127.0.0.1', 6568))
            return await self.route_event_id(event_id)

        # if event.threaded:
        #    threading.Thread(target=self.runner, args=(event, event_id), daemon=True).start()
        #    return "Event running In Thread"
        # else:

        return await self.runner(event, event_id)

    async def runner(self, event, event_id: EventID):

        if event.kwargs_ is None:
            event.kwargs_ = {}
        if event.args is None:
            event.args = []

        if event.source_types.name is SourceTypes.P.name:
            return event.source(*event.args, payload=event_id, **event.kwargs_)

        if event.source_types.name is SourceTypes.F.name:
            return event.source(*event.args, **event.kwargs_)

        if event.source_types.name is SourceTypes.R.name:
            return get_app(str(event_id)).run_any(mod_function_name=event.source, get_results=True, args_=event.args,
                                                  kwargs_=event.kwargs_)

        if event.source_types.name is SourceTypes.AP.name:
            if 'payload' in event.kwargs_:
                if event_id.payload != event.kwargs_['payload']:
                    event_id.payload = event.kwargs_['payload']
                del event.kwargs_['payload']
            print(event.args, event.kwargs_, "TODO: remove")
            return await event.source(*event.args, payload=event_id, **event.kwargs_)

        if event.source_types.name is SourceTypes.AF.name:
            return await event.source(*event.args, **event.kwargs_)

        if event.source_types.name is SourceTypes.AR.name:
            return await get_app(str(event_id)).run_any(mod_function_name=event.source, get_results=True,
                                                        args_=event.args,
                                                        kwargs_=event.kwargs_)

        if event.source_types.name is SourceTypes.S.name:
            return eval(event.source, __locals={'app': get_app(str(event_id)), 'event': event, 'eventManagerC': self})

    async def routing_function_router(self, event_id: EventID):

        result = await self.trigger_event(event_id)

        if result is None:
            result = Result.default_user_error("Invalid Event ID")

        if isinstance(result, bytes | dict):
            pass
        elif isinstance(result, Result):
            result.result.data_info = str(event_id)
        elif isinstance(result, EventID):
            result = Result.default_internal_error("Event not found", data=result)
        else:
            result = Result.ok(data=result, data_info="<automatic>", info=str(event_id.path))

        if isinstance(result, str):
            result = result.encode()

        return result

    async def trigger_evnet_by_name(self, name: str):
        await self.trigger_event(EventID.crate_name_as_id(name=name))

    async def trigger_event(self, event_id: EventID):
        """
        Exec source based on

        source_types
            F -> call directly
            R -> use get_app(str(event_id)).run_any(*args, **kwargs)
            S -> evaluate string
        scope
            instance -> _trigger_local
            local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
            local_network -> use proxy0 app to communicate withe Daemon0 then local
            global_network ->
        exec_in
        event_id
        threaded

                       """
        # print(f"event-id Ptah : {event_id.get_path()}")
        # print(f"testing trigger_event for {event_id.get_source()} {event_id.get_source()[-1] == self.source_id} ")
        print(str(event_id))
        if event_id.get_source()[-1] == self.source_id:
            payload = await self._trigger_local(event_id)
            event_id.set_payload(payload)
            if len(event_id.path) > 1:
                event_id.source = ':'.join([e.split(':')[0] for e in event_id.get_path() if e != "E"])
                res = await self.route_event_id(event_id)
                if isinstance(res, Result):
                    res.print()
                else:
                    print(res)
            return payload
        return await self.route_event_id(event_id)

    async def route_event_id(self, event_id: EventID):

        # print(f"testing route_event_id for {event_id.get_source()[-1]}")
        if event_id.get_source()[-1] == '*':  # self.identification == "P0" and
            responses = []
            event_id.source = ':'.join(event_id.get_source()[:-1])
            event_id.add_path(f"{self._name}({self.source_id})")
            data = asdict(event_id)
            for name, rout_ in self.routes_client.items():
                if name in event_id.path:
                    continue
                ret = await rout_.put_data(data)
                responses.append(ret)
            return responses
        route = self.routes_client.get(event_id.get_source()[-1])
        # print("route:", route)
        if route is None:
            route = self.routes_client.get(event_id.get_path()[-1])
        if route is None:
            return event_id.add_path(("" if len(event_id.get_source()) == 1 else "404#")+self.identification)
        time.sleep(0.25)
        event_id.source = ':'.join(event_id.get_source()[:-1])
        event_id.add_path(f"{self._name}({self.source_id})")
        return await route.put_data(asdict(event_id))

    async def receiver(self):

        t0 = time.time()

        while self.running:
            time.sleep(0.25)
            if not self.receiver_que.empty():
                event_id = self.receiver_que.get()
                print("Receiver Event", str(event_id))
                await self.trigger_event(event_id)

            if time.time() - t0 > 5:
                await self.receive_all_client_data()
                t0 = time.time()

    def info(self):
        return {"source": self.source_id, "known_routs:": self.routers_servers, "_router": self.routes_client,
                "events": self.events}

    def stop(self):
        self.running = False
        list(map(lambda x: x.disconnect(), self.routes_client.values()))
        list(map(lambda x: x.stop(), self.routers_servers.values()))

    def reconnect(self, name):
        if name is None:
            pass
        elif name in self.routes_client:
            self.routes_client[name].reconnect()
            return
        list(map(lambda x: x.reconnect(), self.routes_client.values()))

    async def verify(self, name):
        if name is None:
            pass
        elif name in self.routes_client:
            await self.routes_client[name].verify()
            return
        for x in self.routes_client.values():
            await x.verify()
trigger_event(event_id) async

Exec source based on

source_types F -> call directly R -> use get_app(str(event_id)).run_any(args, *kwargs) S -> evaluate string scope instance -> _trigger_local local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True) local_network -> use proxy0 app to communicate withe Daemon0 then local global_network -> exec_in event_id threaded

Source code in toolboxv2/mods/EventManager/module.py
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
async def trigger_event(self, event_id: EventID):
    """
    Exec source based on

    source_types
        F -> call directly
        R -> use get_app(str(event_id)).run_any(*args, **kwargs)
        S -> evaluate string
    scope
        instance -> _trigger_local
        local -> if you ar proxy app run the event through get_app(str(event_id)).run_any(TBEF.EventManager._trigger_local, args=args, kwargs=kwargs, get_result=True)
        local_network -> use proxy0 app to communicate withe Daemon0 then local
        global_network ->
    exec_in
    event_id
    threaded

                   """
    # print(f"event-id Ptah : {event_id.get_path()}")
    # print(f"testing trigger_event for {event_id.get_source()} {event_id.get_source()[-1] == self.source_id} ")
    print(str(event_id))
    if event_id.get_source()[-1] == self.source_id:
        payload = await self._trigger_local(event_id)
        event_id.set_payload(payload)
        if len(event_id.path) > 1:
            event_id.source = ':'.join([e.split(':')[0] for e in event_id.get_path() if e != "E"])
            res = await self.route_event_id(event_id)
            if isinstance(res, Result):
                res.print()
            else:
                print(res)
        return payload
    return await self.route_event_id(event_id)
Rout dataclass
Source code in toolboxv2/mods/EventManager/module.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
@dataclass
class Rout:
    _from: str
    _to: str

    _from_port: int
    _from_host: str

    _to_port: int
    _to_host: str

    routing_function: Callable

    @property
    def to_host(self):
        return self._to_host

    @property
    def to_port(self):
        return self._to_port

    async def put_data(self, event_id_data: dict[str, str]):
        event_id: EventID = EventID(**event_id_data)
        return await self.routing_function(event_id)

    def close(self):
        """ Close """
close()

Close

Source code in toolboxv2/mods/EventManager/module.py
165
166
def close(self):
    """ Close """

FileWidget

FileUploadHandler

Source code in toolboxv2/mods/FileWidget.py
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
class FileUploadHandler:
    def __init__(self, upload_dir: str = 'uploads'):
        self.upload_dir = Path(upload_dir)
        self.upload_dir.mkdir(parents=True, exist_ok=True)
        # self.app = get_app().app # If logger is needed here

    def save_file(self, chunk_info: ChunkInfo, storage: BlobStorage) -> str:
        """Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged."""
        final_blob_path = Path(chunk_info.filename).name  # Use only filename part for security within blob storage

        if chunk_info.total_chunks == 1:
            # Komplette Datei direkt in BlobStorage speichern
            # print(f"Saving single part file: {final_blob_path} to BlobStorage directly.") # Debug
            with BlobFile(final_blob_path, 'w', storage=storage) as bf:
                bf.write(chunk_info.content)
        else:
            # Chunk lokal speichern
            # Sanitize filename for local path (original chunk_info.filename might contain path parts client-side)
            safe_base_filename = "".join(
                c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(chunk_info.filename).name)
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{chunk_info.chunk_index}"
            # print(f"Saving chunk: {chunk_path} locally. Total chunks: {chunk_info.total_chunks}") # Debug

            with open(chunk_path, 'wb') as f:
                f.write(chunk_info.content)

            if self._all_chunks_received(safe_base_filename, chunk_info.total_chunks):
                # print(f"All chunks received for {safe_base_filename}. Merging to BlobStorage path: {final_blob_path}") # Debug
                self._merge_chunks_to_blob(safe_base_filename, chunk_info.total_chunks, final_blob_path, storage)
                self._cleanup_chunks(safe_base_filename, chunk_info.total_chunks)
            # else:
            # print(f"Still waiting for more chunks for {safe_base_filename}.") # Debug

        return final_blob_path  # Path within BlobStorage

    def _all_chunks_received(self, safe_base_filename: str, total_chunks: int) -> bool:
        for i in range(total_chunks):
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
            if not chunk_path.exists():
                # print(f"Chunk {i} for {safe_base_filename} not found. Path: {chunk_path}") # Debug
                return False
        # print(f"All {total_chunks} chunks found for {safe_base_filename}.") # Debug
        return True

    def _merge_chunks_to_blob(self, safe_base_filename: str, total_chunks: int, final_blob_path: str,
                              storage: BlobStorage):
        # print(f"Merging {total_chunks} chunks for {safe_base_filename} into Blob: {final_blob_path}") # Debug
        with BlobFile(final_blob_path, 'w', storage=storage) as outfile:
            for i in range(total_chunks):
                chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
                # print(f"Appending chunk {i} ({chunk_path}) to Blob.") # Debug
                with open(chunk_path, 'rb') as chunk_file:
                    outfile.write(chunk_file.read())
        # print(f"Finished merging chunks for {safe_base_filename} to Blob: {final_blob_path}") # Debug

    def _cleanup_chunks(self, safe_base_filename: str, total_chunks: int):
        # print(f"Cleaning up {total_chunks} chunks for {safe_base_filename}.") # Debug
        for i in range(total_chunks):
            chunk_path = self.upload_dir / f"{safe_base_filename}.part{i}"
            if chunk_path.exists():
                # print(f"Removing chunk: {chunk_path}") # Debug
                try:
                    os.remove(chunk_path)
                except OSError as e:
                    # self.app.logger.error(f"Error removing chunk {chunk_path}: {e}") # If logger available
                    print(f"Error removing chunk {chunk_path}: {e}")
save_file(chunk_info, storage)

Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged.

Source code in toolboxv2/mods/FileWidget.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
def save_file(self, chunk_info: ChunkInfo, storage: BlobStorage) -> str:
    """Speichert die Datei oder Chunk. Chunks werden lokal gespeichert, dann zu BlobStorage gemerged."""
    final_blob_path = Path(chunk_info.filename).name  # Use only filename part for security within blob storage

    if chunk_info.total_chunks == 1:
        # Komplette Datei direkt in BlobStorage speichern
        # print(f"Saving single part file: {final_blob_path} to BlobStorage directly.") # Debug
        with BlobFile(final_blob_path, 'w', storage=storage) as bf:
            bf.write(chunk_info.content)
    else:
        # Chunk lokal speichern
        # Sanitize filename for local path (original chunk_info.filename might contain path parts client-side)
        safe_base_filename = "".join(
            c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(chunk_info.filename).name)
        chunk_path = self.upload_dir / f"{safe_base_filename}.part{chunk_info.chunk_index}"
        # print(f"Saving chunk: {chunk_path} locally. Total chunks: {chunk_info.total_chunks}") # Debug

        with open(chunk_path, 'wb') as f:
            f.write(chunk_info.content)

        if self._all_chunks_received(safe_base_filename, chunk_info.total_chunks):
            # print(f"All chunks received for {safe_base_filename}. Merging to BlobStorage path: {final_blob_path}") # Debug
            self._merge_chunks_to_blob(safe_base_filename, chunk_info.total_chunks, final_blob_path, storage)
            self._cleanup_chunks(safe_base_filename, chunk_info.total_chunks)
        # else:
        # print(f"Still waiting for more chunks for {safe_base_filename}.") # Debug

    return final_blob_path  # Path within BlobStorage

access_shared_file(self, request, share_id, filename=None, row=None) async

Accesses a shared file via its share_id. The URL for this would be like /api/FileWidget/shared/{share_id_value} The 'share_id: str' in signature implies ToolBoxV2 extracts it from path.

Source code in toolboxv2/mods/FileWidget.py
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="open_shared", api_methods=['GET'],
        request_as_kwarg=True, level=-1, row=True)
async def access_shared_file(self, request: RequestData, share_id: str, filename: str = None, row=None) -> Result:  # share_id from query params
    """
    Accesses a shared file via its share_id.
    The URL for this would be like /api/FileWidget/shared/{share_id_value}
    The 'share_id: str' in signature implies ToolBoxV2 extracts it from path.
    """
    if not share_id:
        return Result.html(data="Share ID is missing in path.", status=302)

    share_info = self.shares.get(share_id) if self.shares is not None else None
    if not share_info:
        return Result.html(data="Share link is invalid or has expired.", status=404)

    owner_uid = share_info["owner_uid"]
    file_path_in_owner_storage = share_info["file_path"]

    try:
        # Get BlobStorage for the owner, not the current request's user (if any)
        owner_storage = await self.get_blob_storage(
            owner_uid_override=owner_uid)  # Crucially, pass request=None if not needed
        self.app.logger.info(
            f"Accessing shared file via link {share_id}: owner {owner_uid}, path {file_path_in_owner_storage}")
        result = await _prepare_file_response(self, owner_storage, file_path_in_owner_storage, row=row is not None)
        if result.is_error():
            self.app.logger.error(f"Error preparing shared file response for {share_id}: {result.info.help_text}")
            return Result.html(data=f"Failed to prepare shared file for download. {result.info.help_text} {result.result.data_info}")
        return result
    except ValueError as e:  # From get_blob_storage if owner_uid is invalid for some reason
        self.app.logger.error(f"Error getting owner's storage for shared file {share_id} (owner {owner_uid}): {e}",
                              exc_info=True)
        return Result.html(data="Could not access owner's storage for shared file.")
    except Exception as e:
        self.app.logger.error(
            f"Error accessing shared file {share_id} (owner {owner_uid}, path {file_path_in_owner_storage}): {e}",
            exc_info=True)
        return Result.html(data="Could not retrieve shared file.")

get_main_ui(self) async

Serves the main HTML UI for the FileWidget.

Source code in toolboxv2/mods/FileWidget.py
598
599
600
601
602
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="ui", api_methods=['GET'])
async def get_main_ui(self) -> Result:
    """Serves the main HTML UI for the FileWidget."""
    html_content = get_template_content()
    return Result.html(data=html_content)

handle_upload(self, request, form_data=None) async

Handles file uploads. Expects chunked data via form_data kwarg from Rust server. 'form_data' structure (from Rust's parsing of multipart) after client sends FormData with fields: 'file' (the blob), 'fileName', 'chunkIndex', 'totalChunks'.

Expected form_data in this Python function: { "file": { // This 'file' key is the NAME of the form field that held the file blob "filename": "original_file_name_for_this_chunk.txt", // from Content-Disposition of the 'file' field part "content_type": "mime/type_of_chunk", "content_base64": "BASE64_ENCODED_CHUNK_CONTENT" }, "fileName": "overall_final_filename.txt", // From a separate form field named 'fileName' "chunkIndex": "0", // From a separate form field named 'chunkIndex' "totalChunks": "5" // From a separate form field named 'totalChunks' }

Source code in toolboxv2/mods/FileWidget.py
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
@export(mod_name=MOD_NAME, api=True, version=VERSION, name="upload", api_methods=['POST'], request_as_kwarg=True)
async def handle_upload(self, request: RequestData, form_data: dict[str, Any] | None = None) -> Result:
    """
    Handles file uploads. Expects chunked data via form_data kwarg from Rust server.
    'form_data' structure (from Rust's parsing of multipart) after client sends FormData with fields:
    'file' (the blob), 'fileName', 'chunkIndex', 'totalChunks'.

    Expected `form_data` in this Python function:
    {
        "file": {  // This 'file' key is the NAME of the form field that held the file blob
            "filename": "original_file_name_for_this_chunk.txt", // from Content-Disposition of the 'file' field part
            "content_type": "mime/type_of_chunk",
            "content_base64": "BASE64_ENCODED_CHUNK_CONTENT"
        },
        "fileName": "overall_final_filename.txt", // From a separate form field named 'fileName'
        "chunkIndex": "0",                        // From a separate form field named 'chunkIndex'
        "totalChunks": "5"                        // From a separate form field named 'totalChunks'
    }
    """
    self.app.logger.debug(
        f"FileWidget: handle_upload called. Received form_data keys: {list(form_data.keys()) if form_data else 'None'}"
    )
    self.app.logger.debug(f"FileWidget: handle_upload called. Received form_data: {request.to_dict()}")
    # self.app.logger.debug(f"Full form_data: {form_data}") # For deeper debugging if needed

    if not form_data:
        return Result.default_user_error(info="No form data received for upload.", exec_code=400)

    try:
        storage = await self.get_blob_storage(request)

        # Extract data from form_data (populated by Rust server from multipart)
        file_field_data = form_data.get('file')  # This is the dict from UploadedFile struct
        # The 'file_field_data.get('filename')' is the name of the chunk part,
        # which the JS client sets to be the same as the original file's name.
        # This is fine for FileUploadHandler.save_file's chunk_info.filename if total_chunks > 1,
        # as it will be used to create temporary part files like "original_file_name.txt.part0".

        overall_filename_from_form = form_data.get('fileName') # This is the target filename for the assembled file.
        chunk_index_str = form_data.get('chunkIndex')
        total_chunks_str = form_data.get('totalChunks')

        if not all([
            file_field_data, isinstance(file_field_data, dict),
            overall_filename_from_form,
            chunk_index_str is not None, # Check for presence, not just truthiness (0 is valid)
            total_chunks_str is not None # Check for presence
        ]):
            missing = []
            if not file_field_data or not isinstance(file_field_data, dict): missing.append("'file' object field")
            if not overall_filename_from_form: missing.append("'fileName' field")
            if chunk_index_str is None: missing.append("'chunkIndex' field")
            if total_chunks_str is None: missing.append("'totalChunks' field")

            self.app.logger.error(
                f"Missing critical form data fields for upload: {missing}. Received form_data: {form_data}")
            return Result.default_user_error(info=f"Incomplete upload data. Missing: {', '.join(missing)}",
                                             exec_code=400)

        content_base64 = file_field_data.get('content_base64')
        if not content_base64:
            return Result.default_user_error(info="File content (base64) not found in 'file' field data.",
                                             exec_code=400)

        try:
            content_bytes = base64.b64decode(content_base64)
        except base64.binascii.Error as b64_error:
            self.app.logger.error(f"Base64 decoding failed for upload: {b64_error}")
            return Result.default_user_error(info="Invalid file content encoding.", exec_code=400)

        try:
            chunk_index = int(chunk_index_str)
            total_chunks = int(total_chunks_str)
        except ValueError:
            return Result.default_user_error(info="Invalid chunk index or total chunks value. Must be integers.", exec_code=400)

        # Use the 'overall_filename_from_form' for the ChunkInfo.filename,
        # as this is the intended final name in blob storage.
        # FileUploadHandler will use Path(this_name).name to ensure it's just a filename.
        chunk_info_to_save = ChunkInfo(
            filename=overall_filename_from_form, # THIS IS THE KEY CHANGE FOR CONSISTENCY
            chunk_index=chunk_index,
            total_chunks=total_chunks,
            content=content_bytes
        )

        self.app.logger.info(
            f"Processing chunk {chunk_index + 1}/{total_chunks} for final file '{overall_filename_from_form}'. " # Log the intended final name
            f"Size: {len(content_bytes)} bytes."
        )

        saved_blob_path = self.upload_handler.save_file(chunk_info_to_save, storage) # saved_blob_path will be Path(overall_filename_from_form).name

        msg = f"Chunk {chunk_index + 1}/{total_chunks} for '{saved_blob_path}' saved."
        if chunk_info_to_save.chunk_index == chunk_info_to_save.total_chunks - 1:
            # Check if fully assembled
            # The 'safe_base_filename' in FileUploadHandler is derived from ChunkInfo.filename,
            # which we've now set to 'overall_filename_from_form'.
            # So, this check should work correctly.
            safe_base_filename_for_check = "".join(
                c if c.isalnum() or c in ('.', '_', '-') else '_' for c in Path(overall_filename_from_form).name)

            # A slight delay might be needed if file system operations are not instantly consistent across threads/processes
            # For now, assume direct check is okay.
            # await asyncio.sleep(0.1) # Optional small delay if race conditions are suspected with file system

            if self.upload_handler._all_chunks_received(safe_base_filename_for_check, total_chunks):
                msg = f"File '{saved_blob_path}' upload complete and assembled."
                self.app.logger.info(msg)
            else:
                msg = f"Final chunk for '{saved_blob_path}' saved, but assembly check failed or is pending."
                self.app.logger.warning(msg + f" (Could not verify all chunks for '{safe_base_filename_for_check}' immediately after final one)")


        return Result.ok(data={"message": msg, "path": saved_blob_path}) # Return the blob-relative path

    except ValueError as e:
        self.app.logger.error(f"Upload processing error: {e}", exc_info=True)
        return Result.default_user_error(info=f"Upload error: {str(e)}",
                                         exec_code=400 if "authentication" in str(e).lower() else 400)
    except Exception as e:
        self.app.logger.error(f"Unexpected error during file upload: {e}", exc_info=True)
        return Result.default_internal_error(info="An unexpected error occurred during upload.")

P2PRPCClient

P2PRPCClient

Source code in toolboxv2/mods/P2PRPCClient.py
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
class P2PRPCClient:
    def __init__(self, app: App, host: str, port: int, tb_r_key: str = None):
        self.app = app
        self.host = host
        self.port = port
        self.reader = None
        self.writer = None
        self.futures = {}
        self.code = Code()

        if tb_r_key is None:
            tb_r_key = os.getenv("TB_R_KEY")
            if tb_r_key is None:
                raise ValueError("TB_R_KEY environment variable is not set.")

        if len(tb_r_key) < 24:
            raise ValueError("TB_R_KEY must be at least 24 characters long for security.")
        self.auth_key_part = tb_r_key[:24]
        self.identification_part = tb_r_key[24:]
        self.session_key = None

    async def connect(self):
        """Connects to the local tcm instance and performs key exchange."""
        try:
            self.reader, self.writer = await asyncio.open_connection(self.host, self.port)
            print(f"RPC Client: Connected to tcm at {self.host}:{self.port}")

            # Receive encrypted session key from server
            len_data = await self.reader.readexactly(4)
            encrypted_session_key_len = int.from_bytes(len_data, 'big')
            encrypted_session_key = (await self.reader.readexactly(encrypted_session_key_len)).decode('utf-8')

            # Decrypt session key using auth_key_part
            self.session_key = self.code.decrypt_symmetric(encrypted_session_key, self.auth_key_part)

            # Send challenge back to server, encrypted with session key
            challenge = "CHALLENGE_ACK"
            encrypted_challenge = self.code.encrypt_symmetric(challenge, self.session_key)
            self.writer.write(len(encrypted_challenge).to_bytes(4, 'big'))
            self.writer.write(encrypted_challenge.encode('utf-8'))
            await self.writer.drain()

            # Start a background task to listen for responses
            asyncio.create_task(self.listen_for_responses())

        except ConnectionRefusedError:
            print(f"RPC Client: Connection to {self.host}:{self.port} refused. Is the tcm peer running?")
            raise
        except Exception as e:
            print(f"RPC Client: Error during connection/key exchange: {e}")
            raise

    async def listen_for_responses(self):
        """Listens for incoming responses, decrypts them, and resolves the corresponding future."""
        try:
            while True:
                len_data = await self.reader.readexactly(4)
                msg_len = int.from_bytes(len_data, 'big')
                encrypted_msg_data = (await self.reader.readexactly(msg_len)).decode('utf-8')

                decrypted_msg_data = self.code.decrypt_symmetric(encrypted_msg_data, self.session_key)
                response = json.loads(decrypted_msg_data)

                call_id = response.get('call_id')
                if call_id in self.futures:
                    future = self.futures.pop(call_id)
                    future.set_result(response)
        except asyncio.IncompleteReadError:
            print("RPC Client: Connection closed.")
        except Exception as e:
            print(f"RPC Client: Error listening for responses: {e}")
        finally:
            # Clean up any pending futures
            for future in self.futures.values():
                future.set_exception(ConnectionError("Connection lost"))
            self.futures.clear()

    async def call(self, module: str, function: str, *args, **kwargs):
        """Makes a remote procedure call."""
        if not self.writer:
            await self.connect()

        call_id = str(uuid.uuid4())
        request = {
            "type": "request",
            "call_id": call_id,
            "module": module,
            "function": function,
            "args": args,
            "kwargs": kwargs,
            "identification_part": self.identification_part
        }

        future = asyncio.get_running_loop().create_future()
        self.futures[call_id] = future

        try:
            request_str = json.dumps(request)
            encrypted_request = self.code.encrypt_symmetric(request_str, self.session_key)

            self.writer.write(len(encrypted_request).to_bytes(4, 'big'))
            self.writer.write(encrypted_request.encode('utf-8'))
            await self.writer.drain()

            # Wait for the response with a timeout
            response = await asyncio.wait_for(future, timeout=30.0)

            if response.get('error'):
                return Result(**response['error'])
            else:
                return Result.ok(response.get('result'))

        except TimeoutError:
            self.futures.pop(call_id, None)
            return Result.default_internal_error("RPC call timed out.")
        except Exception as e:
            self.futures.pop(call_id, None)
            return Result.default_internal_error(f"RPC call failed: {e}")

    async def close(self):
        """Closes the connection."""
        if self.writer:
            self.writer.close()
            await self.writer.wait_closed()
            print("RPC Client: Connection closed.")
call(module, function, *args, **kwargs) async

Makes a remote procedure call.

Source code in toolboxv2/mods/P2PRPCClient.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
async def call(self, module: str, function: str, *args, **kwargs):
    """Makes a remote procedure call."""
    if not self.writer:
        await self.connect()

    call_id = str(uuid.uuid4())
    request = {
        "type": "request",
        "call_id": call_id,
        "module": module,
        "function": function,
        "args": args,
        "kwargs": kwargs,
        "identification_part": self.identification_part
    }

    future = asyncio.get_running_loop().create_future()
    self.futures[call_id] = future

    try:
        request_str = json.dumps(request)
        encrypted_request = self.code.encrypt_symmetric(request_str, self.session_key)

        self.writer.write(len(encrypted_request).to_bytes(4, 'big'))
        self.writer.write(encrypted_request.encode('utf-8'))
        await self.writer.drain()

        # Wait for the response with a timeout
        response = await asyncio.wait_for(future, timeout=30.0)

        if response.get('error'):
            return Result(**response['error'])
        else:
            return Result.ok(response.get('result'))

    except TimeoutError:
        self.futures.pop(call_id, None)
        return Result.default_internal_error("RPC call timed out.")
    except Exception as e:
        self.futures.pop(call_id, None)
        return Result.default_internal_error(f"RPC call failed: {e}")
close() async

Closes the connection.

Source code in toolboxv2/mods/P2PRPCClient.py
133
134
135
136
137
138
async def close(self):
    """Closes the connection."""
    if self.writer:
        self.writer.close()
        await self.writer.wait_closed()
        print("RPC Client: Connection closed.")
connect() async

Connects to the local tcm instance and performs key exchange.

Source code in toolboxv2/mods/P2PRPCClient.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
async def connect(self):
    """Connects to the local tcm instance and performs key exchange."""
    try:
        self.reader, self.writer = await asyncio.open_connection(self.host, self.port)
        print(f"RPC Client: Connected to tcm at {self.host}:{self.port}")

        # Receive encrypted session key from server
        len_data = await self.reader.readexactly(4)
        encrypted_session_key_len = int.from_bytes(len_data, 'big')
        encrypted_session_key = (await self.reader.readexactly(encrypted_session_key_len)).decode('utf-8')

        # Decrypt session key using auth_key_part
        self.session_key = self.code.decrypt_symmetric(encrypted_session_key, self.auth_key_part)

        # Send challenge back to server, encrypted with session key
        challenge = "CHALLENGE_ACK"
        encrypted_challenge = self.code.encrypt_symmetric(challenge, self.session_key)
        self.writer.write(len(encrypted_challenge).to_bytes(4, 'big'))
        self.writer.write(encrypted_challenge.encode('utf-8'))
        await self.writer.drain()

        # Start a background task to listen for responses
        asyncio.create_task(self.listen_for_responses())

    except ConnectionRefusedError:
        print(f"RPC Client: Connection to {self.host}:{self.port} refused. Is the tcm peer running?")
        raise
    except Exception as e:
        print(f"RPC Client: Error during connection/key exchange: {e}")
        raise
listen_for_responses() async

Listens for incoming responses, decrypts them, and resolves the corresponding future.

Source code in toolboxv2/mods/P2PRPCClient.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
async def listen_for_responses(self):
    """Listens for incoming responses, decrypts them, and resolves the corresponding future."""
    try:
        while True:
            len_data = await self.reader.readexactly(4)
            msg_len = int.from_bytes(len_data, 'big')
            encrypted_msg_data = (await self.reader.readexactly(msg_len)).decode('utf-8')

            decrypted_msg_data = self.code.decrypt_symmetric(encrypted_msg_data, self.session_key)
            response = json.loads(decrypted_msg_data)

            call_id = response.get('call_id')
            if call_id in self.futures:
                future = self.futures.pop(call_id)
                future.set_result(response)
    except asyncio.IncompleteReadError:
        print("RPC Client: Connection closed.")
    except Exception as e:
        print(f"RPC Client: Error listening for responses: {e}")
    finally:
        # Clean up any pending futures
        for future in self.futures.values():
            future.set_exception(ConnectionError("Connection lost"))
        self.futures.clear()

test_rpc_client(app, host='127.0.0.1', port=8000, tb_r_key=None) async

An example of how to use the P2P RPC Client.

Source code in toolboxv2/mods/P2PRPCClient.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
@export(mod_name=Name, name="test_rpc_client", test=False)
async def test_rpc_client(app: App, host: str = '127.0.0.1', port: int = 8000, tb_r_key: str = None):
    """An example of how to use the P2P RPC Client."""
    if tb_r_key is None:
        tb_r_key = os.getenv("TB_R_KEY")
        if tb_r_key is None:
            raise ValueError("TB_R_KEY environment variable is not set.")

    client = P2PRPCClient(app, host, port, tb_r_key)
    try:
        await client.connect()
        # Example: Call the 'list-users' function from the 'helper' module
        result = await client.call("helper", "list-users")
        result.print()
    finally:
        await client.close()

P2PRPCServer

P2PRPCServer

Source code in toolboxv2/mods/P2PRPCServer.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
class P2PRPCServer:
    def __init__(self, app: App, host: str, port: int, tb_r_key: str, function_access_config: dict = None):
        self.app = app
        self.host = host
        self.port = port
        self.server = None
        self.code = Code()

        if len(tb_r_key) < 24:
            raise ValueError("TB_R_KEY must be at least 24 characters long for security.")
        self.auth_key_part = tb_r_key[:24]
        self.identification_part_server = tb_r_key[24:]

        self.function_access_config = function_access_config if function_access_config is not None else {}

    async def handle_client(self, reader, writer):
        """Callback to handle a single client connection from a tcm instance."""
        addr = writer.get_extra_info('peername')
        print(f"RPC Server: New connection from {addr}")

        session_key = self.code.generate_symmetric_key()
        encrypted_session_key = self.code.encrypt_symmetric(session_key, self.auth_key_part)

        try:
            writer.write(len(encrypted_session_key).to_bytes(4, 'big'))
            writer.write(encrypted_session_key.encode('utf-8'))
            await writer.drain()

            len_data = await reader.readexactly(4)
            encrypted_challenge_len = int.from_bytes(len_data, 'big')
            encrypted_challenge = (await reader.readexactly(encrypted_challenge_len)).decode('utf-8')

            decrypted_challenge = self.code.decrypt_symmetric(encrypted_challenge, session_key)
            if decrypted_challenge != "CHALLENGE_ACK":
                raise ValueError("Invalid challenge received.")

            print(f"RPC Server: Authenticated client {addr}")

            while True:
                len_data = await reader.readexactly(4)
                msg_len = int.from_bytes(len_data, 'big')

                encrypted_msg_data = (await reader.readexactly(msg_len)).decode('utf-8')

                decrypted_msg_data = self.code.decrypt_symmetric(encrypted_msg_data, session_key)

                response = await self.process_rpc(decrypted_msg_data, session_key)

                encrypted_response = self.code.encrypt_symmetric(json.dumps(response), session_key)

                writer.write(len(encrypted_response).to_bytes(4, 'big'))
                writer.write(encrypted_response.encode('utf-8'))
                await writer.drain()

        except asyncio.IncompleteReadError:
            print(f"RPC Server: Connection from {addr} closed.")
        except Exception as e:
            print(f"RPC Server: Error with client {addr}: {e}")
        finally:
            writer.close()
            await writer.wait_closed()

    async def process_rpc(self, msg_data: str, session_key: str) -> dict:
        """Processes a single RPC request and returns a response dictionary."""
        try:
            call = json.loads(msg_data)
            if call.get('type') != 'request':
                raise ValueError("Invalid message type")
        except (json.JSONDecodeError, ValueError) as e:
            return self.format_error(call.get('call_id'), -32700, f"Parse error: {e}")

        call_id = call.get('call_id')
        module = call.get('module')
        function = call.get('function')
        args = call.get('args', [])
        kwargs = call.get('kwargs', {})
        client_identification = call.get('identification_part')

        if not self.is_function_allowed(module, function, client_identification):
            error_msg = f"Function '{module}.{function}' is not allowed for identification '{client_identification}'."
            print(f"RPC Server: {error_msg}")
            return self.format_error(call_id, -32601, "Method not found or not allowed")

        print(f"RPC Server: Executing '{module}.{function}' for '{client_identification}'")
        try:
            result: Result = await self.app.a_run_any(
                (module, function),
                args_=args,
                kwargs_=kwargs,
                get_results=True
            )

            if result.is_error():
                return self.format_error(call_id, result.info.get('exec_code', -32000), result.info.get('help_text'), result.get())
            else:
                return {
                    "type": "response",
                    "call_id": call_id,
                    "result": result.get(),
                    "error": None
                }
        except Exception as e:
            print(f"RPC Server: Exception during execution of '{module}.{function}': {e}")
            return self.format_error(call_id, -32603, "Internal error during execution", str(e))

    def is_function_allowed(self, module: str, function: str, client_identification: str) -> bool:
        """Checks if a function is allowed for a given client identification."""
        if module not in self.function_access_config:
            return False

        allowed_functions_for_module = self.function_access_config[module]

        if function not in allowed_functions_for_module:
            return False

        # If the function is whitelisted, and there's a specific identification part,
        # you might want to add more granular control here.
        # For now, if it's in the whitelist, it's allowed for any identified client.
        # You could extend function_access_config to be:
        # {"ModuleName": {"function1": ["id1", "id2"], "function2": ["id3"]}}
        # For simplicity, current implementation assumes if module.function is in whitelist,
        # it's generally allowed for any authenticated client.
        return True

    def format_error(self, call_id, code, message, details=None) -> dict:
        """Helper to create a JSON-RPC error response object."""
        return {
            "type": "response",
            "call_id": call_id,
            "result": None,
            "error": {
                "code": code,
                "message": message,
                "details": details
            }
        }

    async def start(self):
        """Starts the TCP server."""
        self.server = await asyncio.start_server(
            self.handle_client, self.host, self.port
        )
        addr = self.server.sockets[0].getsockname()
        print(f"P2P RPC Server listening on {addr}")
        async with self.server:
            await self.server.serve_forever()

    def stop(self):
        """Stops the TCP server."""
        if self.server:
            self.server.close()
            print("P2P RPC Server stopped.")
format_error(call_id, code, message, details=None)

Helper to create a JSON-RPC error response object.

Source code in toolboxv2/mods/P2PRPCServer.py
137
138
139
140
141
142
143
144
145
146
147
148
def format_error(self, call_id, code, message, details=None) -> dict:
    """Helper to create a JSON-RPC error response object."""
    return {
        "type": "response",
        "call_id": call_id,
        "result": None,
        "error": {
            "code": code,
            "message": message,
            "details": details
        }
    }
handle_client(reader, writer) async

Callback to handle a single client connection from a tcm instance.

Source code in toolboxv2/mods/P2PRPCServer.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
async def handle_client(self, reader, writer):
    """Callback to handle a single client connection from a tcm instance."""
    addr = writer.get_extra_info('peername')
    print(f"RPC Server: New connection from {addr}")

    session_key = self.code.generate_symmetric_key()
    encrypted_session_key = self.code.encrypt_symmetric(session_key, self.auth_key_part)

    try:
        writer.write(len(encrypted_session_key).to_bytes(4, 'big'))
        writer.write(encrypted_session_key.encode('utf-8'))
        await writer.drain()

        len_data = await reader.readexactly(4)
        encrypted_challenge_len = int.from_bytes(len_data, 'big')
        encrypted_challenge = (await reader.readexactly(encrypted_challenge_len)).decode('utf-8')

        decrypted_challenge = self.code.decrypt_symmetric(encrypted_challenge, session_key)
        if decrypted_challenge != "CHALLENGE_ACK":
            raise ValueError("Invalid challenge received.")

        print(f"RPC Server: Authenticated client {addr}")

        while True:
            len_data = await reader.readexactly(4)
            msg_len = int.from_bytes(len_data, 'big')

            encrypted_msg_data = (await reader.readexactly(msg_len)).decode('utf-8')

            decrypted_msg_data = self.code.decrypt_symmetric(encrypted_msg_data, session_key)

            response = await self.process_rpc(decrypted_msg_data, session_key)

            encrypted_response = self.code.encrypt_symmetric(json.dumps(response), session_key)

            writer.write(len(encrypted_response).to_bytes(4, 'big'))
            writer.write(encrypted_response.encode('utf-8'))
            await writer.drain()

    except asyncio.IncompleteReadError:
        print(f"RPC Server: Connection from {addr} closed.")
    except Exception as e:
        print(f"RPC Server: Error with client {addr}: {e}")
    finally:
        writer.close()
        await writer.wait_closed()
is_function_allowed(module, function, client_identification)

Checks if a function is allowed for a given client identification.

Source code in toolboxv2/mods/P2PRPCServer.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
def is_function_allowed(self, module: str, function: str, client_identification: str) -> bool:
    """Checks if a function is allowed for a given client identification."""
    if module not in self.function_access_config:
        return False

    allowed_functions_for_module = self.function_access_config[module]

    if function not in allowed_functions_for_module:
        return False

    # If the function is whitelisted, and there's a specific identification part,
    # you might want to add more granular control here.
    # For now, if it's in the whitelist, it's allowed for any identified client.
    # You could extend function_access_config to be:
    # {"ModuleName": {"function1": ["id1", "id2"], "function2": ["id3"]}}
    # For simplicity, current implementation assumes if module.function is in whitelist,
    # it's generally allowed for any authenticated client.
    return True
process_rpc(msg_data, session_key) async

Processes a single RPC request and returns a response dictionary.

Source code in toolboxv2/mods/P2PRPCServer.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
async def process_rpc(self, msg_data: str, session_key: str) -> dict:
    """Processes a single RPC request and returns a response dictionary."""
    try:
        call = json.loads(msg_data)
        if call.get('type') != 'request':
            raise ValueError("Invalid message type")
    except (json.JSONDecodeError, ValueError) as e:
        return self.format_error(call.get('call_id'), -32700, f"Parse error: {e}")

    call_id = call.get('call_id')
    module = call.get('module')
    function = call.get('function')
    args = call.get('args', [])
    kwargs = call.get('kwargs', {})
    client_identification = call.get('identification_part')

    if not self.is_function_allowed(module, function, client_identification):
        error_msg = f"Function '{module}.{function}' is not allowed for identification '{client_identification}'."
        print(f"RPC Server: {error_msg}")
        return self.format_error(call_id, -32601, "Method not found or not allowed")

    print(f"RPC Server: Executing '{module}.{function}' for '{client_identification}'")
    try:
        result: Result = await self.app.a_run_any(
            (module, function),
            args_=args,
            kwargs_=kwargs,
            get_results=True
        )

        if result.is_error():
            return self.format_error(call_id, result.info.get('exec_code', -32000), result.info.get('help_text'), result.get())
        else:
            return {
                "type": "response",
                "call_id": call_id,
                "result": result.get(),
                "error": None
            }
    except Exception as e:
        print(f"RPC Server: Exception during execution of '{module}.{function}': {e}")
        return self.format_error(call_id, -32603, "Internal error during execution", str(e))
start() async

Starts the TCP server.

Source code in toolboxv2/mods/P2PRPCServer.py
150
151
152
153
154
155
156
157
158
async def start(self):
    """Starts the TCP server."""
    self.server = await asyncio.start_server(
        self.handle_client, self.host, self.port
    )
    addr = self.server.sockets[0].getsockname()
    print(f"P2P RPC Server listening on {addr}")
    async with self.server:
        await self.server.serve_forever()
stop()

Stops the TCP server.

Source code in toolboxv2/mods/P2PRPCServer.py
160
161
162
163
164
def stop(self):
    """Stops the TCP server."""
    if self.server:
        self.server.close()
        print("P2P RPC Server stopped.")

start_rpc_server(app, host='127.0.0.1', port=8888, tb_r_key=None, function_access_config=None) async

Starts the P2P RPC server.

Source code in toolboxv2/mods/P2PRPCServer.py
166
167
168
169
170
171
172
173
174
175
176
177
178
@export(mod_name=Name, name="start_server", test=False)
async def start_rpc_server(app: App, host: str = '127.0.0.1', port: int = 8888, tb_r_key: str = None, function_access_config: dict = None):
    """Starts the P2P RPC server."""
    if tb_r_key is None:
        tb_r_key = os.getenv("TB_R_KEY")
        if tb_r_key is None:
            raise ValueError("TB_R_KEY environment variable is not set.")

    server = P2PRPCServer(app, host, port, tb_r_key, function_access_config)
    try:
        await server.start()
    except KeyboardInterrupt:
        server.stop()

POA

module

ActionManagerEnhanced
Source code in toolboxv2/mods/POA/module.py
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
class ActionManagerEnhanced:
    DB_ITEMS_PREFIX = "donext_items"
    DB_HISTORY_PREFIX = "donext_history"
    DB_CURRENT_ITEM_PREFIX = "donext_current_item"
    DB_UNDO_LOG_PREFIX = "donext_undo_log"
    DB_SETTINGS_PREFIX = "donext_settings"  # Added for user settings

    def __init__(self, app: App, user_id: str):
        self.app = app
        self.user_id = user_id
        self.db = app.get_mod("DB")
        self.isaa = app.get_mod("isaa")

        self.settings: UserSettings = UserSettings(user_id=user_id)  # Initialize with defaults
        self.items: list[ActionItem] = []
        self.history: list[HistoryEntry] = []
        self.current_item: ActionItem | None = None
        self.undo_log: list[UndoLogEntry] = []

        self._load_settings()  # Load settings first as they might affect item loading
        self._load_data()

    def _get_db_key(self, prefix: str) -> str:
        return f"{prefix}_{self.user_id}"

    def get_user_timezone(self) -> pytz.BaseTzInfo:
        try:
            return pytz.timezone(self.settings.timezone)
        except pytz.UnknownTimeZoneError:
            return pytz.utc

    def _load_settings(self):
        settings_key = self._get_db_key(self.DB_SETTINGS_PREFIX)
        try:
            settings_data = self.db.get(settings_key)
            if settings_data.is_data() and settings_data.get():
                loaded_settings = json.loads(settings_data.get()[0]) if isinstance(settings_data.get(),
                                                                                   list) else json.loads(
                    settings_data.get())
                self.settings = UserSettings.model_validate_json_safe(loaded_settings)
            else:  # Save default settings if not found
                self._save_settings()
        except Exception as e:
            self.app.logger.error(f"Error loading settings for user {self.user_id}: {e}. Using defaults.")
            self.settings = UserSettings(user_id=self.user_id)  # Fallback to defaults
            self._save_settings()  # Attempt to save defaults

    def _save_settings(self):
        try:
            self.db.set(self._get_db_key(self.DB_SETTINGS_PREFIX), json.dumps(self.settings.model_dump_json_safe()))
        except Exception as e:
            self.app.logger.error(f"Error saving settings for user {self.user_id}: {e}")

    def update_user_settings(self, settings_data: dict[str, Any]) -> UserSettings:
        # Ensure user_id is not changed by malicious input
        current_user_id = self.settings.user_id
        updated_settings = UserSettings.model_validate(
            {**self.settings.model_dump(), **settings_data, "user_id": current_user_id})
        self.settings = updated_settings
        self._save_settings()
        # Potentially re-process items if timezone change affects interpretations, though this is complex.
        # For now, new items will use the new timezone. Existing UTC times remain.
        self.app.logger.info(f"User {self.user_id} settings updated: Timezone {self.settings.timezone}")
        return self.settings

    def _load_data(self):
        items_key = self._get_db_key(self.DB_ITEMS_PREFIX)
        history_key = self._get_db_key(self.DB_HISTORY_PREFIX)
        current_item_key = self._get_db_key(self.DB_CURRENT_ITEM_PREFIX)
        undo_log_key = self._get_db_key(self.DB_UNDO_LOG_PREFIX)
        user_tz_str = self.settings.timezone  # For model_validate_json_safe context

        try:
            items_data = self.db.get(items_key)
            if items_data.is_data() and items_data.get():
                loaded_items_raw = json.loads(items_data.get()[0]) if isinstance(items_data.get(),
                                                                                 list) else json.loads(items_data.get())
                self.items = [ActionItem.model_validate_json_safe(item_dict, user_timezone_str=user_tz_str) for
                              item_dict in loaded_items_raw]

            history_data = self.db.get(history_key)
            if history_data.is_data() and history_data.get():
                loaded_history_raw = json.loads(history_data.get()[0]) if isinstance(history_data.get(),
                                                                                     list) else json.loads(
                    history_data.get())
                self.history = [HistoryEntry.model_validate_json_safe(entry_dict) for entry_dict in loaded_history_raw]

            current_item_data = self.db.get(current_item_key)
            if current_item_data.is_data() and current_item_data.get():
                current_item_dict = json.loads(current_item_data.get()[0]) if isinstance(current_item_data.get(),
                                                                                         list) else json.loads(
                    current_item_data.get())
                if current_item_dict:
                    self.current_item = ActionItem.model_validate_json_safe(current_item_dict,
                                                                            user_timezone_str=user_tz_str)

            undo_log_data = self.db.get(undo_log_key)
            if undo_log_data.is_data() and undo_log_data.get():
                loaded_undo_raw = json.loads(undo_log_data.get()[0]) if isinstance(undo_log_data.get(),
                                                                                   list) else json.loads(
                    undo_log_data.get())
                self.undo_log = [UndoLogEntry.model_validate_json_safe(entry_dict) for entry_dict in loaded_undo_raw]

        except Exception as e:
            self.app.logger.error(f"Error loading data for user {self.user_id}: {e}")
            self.items, self.history, self.current_item, self.undo_log = [], [], None, []
        self._recalculate_next_due_for_all()

    def _save_data(self):
        try:
            self.db.set(self._get_db_key(self.DB_ITEMS_PREFIX),
                        json.dumps([item.model_dump_json_safe() for item in self.items]))
            self.db.set(self._get_db_key(self.DB_HISTORY_PREFIX),
                        json.dumps([entry.model_dump_json_safe() for entry in self.history]))
            self.db.set(self._get_db_key(self.DB_CURRENT_ITEM_PREFIX),
                        json.dumps(self.current_item.model_dump_json_safe() if self.current_item else None))
            self.db.set(self._get_db_key(self.DB_UNDO_LOG_PREFIX),
                        json.dumps([entry.model_dump_json_safe() for entry in self.undo_log]))
        except Exception as e:
            self.app.logger.error(f"Error saving data for user {self.user_id}: {e}")

    def _add_history_entry(self, item: ActionItem, status_override: ActionStatus | None = None,
                           notes: str | None = None):
        entry = HistoryEntry(
            item_id=item.id, item_title=item.title, item_type=item.item_type,
            status_changed_to=status_override or item.status,
            parent_id=item.parent_id, notes=notes
        )
        self.history.append(entry)

    def _datetime_to_user_tz(self, dt_utc: datetime | None) -> datetime | None:
        if not dt_utc: return None
        if dt_utc.tzinfo is None: dt_utc = pytz.utc.localize(dt_utc)  # Should already be UTC
        return dt_utc.astimezone(self.get_user_timezone())

    def _datetime_from_user_input_str(self, dt_str: str | None) -> datetime | None:
        if not dt_str: return None
        try:
            dt = isoparse(dt_str)
            if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:  # Naive
                return self.get_user_timezone().localize(dt).astimezone(pytz.utc)
            return dt.astimezone(pytz.utc)  # Aware, convert to UTC
        except ValueError:
            self.app.logger.warning(f"Could not parse datetime string: {dt_str}")
            return None

    def _recalculate_next_due(self, item: ActionItem):
        now_utc = datetime.now(pytz.utc)
        user_tz = self.get_user_timezone()

        if item.status == ActionStatus.COMPLETED and item.item_type == ItemType.TASK:
            if item.frequency and item.frequency != Frequency.ONE_TIME:
                base_time_utc = item.last_completed or now_utc  # last_completed is already UTC

                # If item had a fixed_time, align next_due to that time of day in user's timezone
                if item.fixed_time:
                    original_fixed_time_user_tz = item.fixed_time.astimezone(user_tz)
                    # Start from last_completed (or now if missing) in user's timezone for calculation
                    base_time_user_tz = base_time_utc.astimezone(user_tz)

                    # Ensure base_time_user_tz is at least original_fixed_time_user_tz for alignment
                    # but calculations should project from last completion.
                    # For example, if daily task due 9am was completed at 11am, next one is tomorrow 9am.
                    # If completed at 8am, next one is today 9am (if fixed_time was today 9am) or tomorrow 9am.

                    # Let's use last_completed as the primary anchor for when the *next* cycle starts.
                    # The original fixed_time's time component is used for the *time of day* of the next due.

                    current_anchor_user_tz = base_time_user_tz

                    # Calculate next occurrence based on frequency
                    if item.frequency == Frequency.DAILY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=1)).date()
                    elif item.frequency == Frequency.WEEKLY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(weeks=1)).date()
                    elif item.frequency == Frequency.MONTHLY:  # Simplified
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=30)).date()
                    elif item.frequency == Frequency.ANNUALLY:
                        next_due_user_tz_date = (current_anchor_user_tz + timedelta(days=365)).date()
                    else:  # Should not happen for recurring
                        item.next_due = None
                        return

                    # Combine with original time of day
                    next_due_user_tz = datetime.combine(next_due_user_tz_date, original_fixed_time_user_tz.time(),
                                                        tzinfo=user_tz)
                    item.next_due = next_due_user_tz.astimezone(pytz.utc)

                else:  # No original fixed_time, so recur based on current time of completion
                    if item.frequency == Frequency.DAILY:
                        item.next_due = base_time_utc + timedelta(days=1)
                    elif item.frequency == Frequency.WEEKLY:
                        item.next_due = base_time_utc + timedelta(weeks=1)
                    elif item.frequency == Frequency.MONTHLY:
                        item.next_due = base_time_utc + timedelta(days=30)
                    elif item.frequency == Frequency.ANNUALLY:
                        item.next_due = base_time_utc + timedelta(days=365)

                # Advance until future if needed (e.g., completing an overdue recurring task)
                # This loop must operate on user's local time perception of "next day"
                while item.next_due and item.next_due < now_utc:
                    next_due_user = item.next_due.astimezone(user_tz)
                    original_time_comp = next_due_user.time()  # Preserve time of day

                    if item.frequency == Frequency.DAILY:
                        next_due_user_adv = next_due_user + timedelta(days=1)
                    elif item.frequency == Frequency.WEEKLY:
                        next_due_user_adv = next_due_user + timedelta(weeks=1)
                    # For monthly/annually, simple timedelta might shift day of month. Using replace for date part.
                    elif item.frequency == Frequency.MONTHLY:
                        # This simplified logic might need dateutil.relativedelta for accuracy
                        year, month = (next_due_user.year, next_due_user.month + 1) if next_due_user.month < 12 else (
                            next_due_user.year + 1, 1)
                        try:
                            next_due_user_adv = next_due_user.replace(year=year, month=month)
                        except ValueError:  # Handle e.g. trying to set Feb 30
                            import calendar
                            last_day = calendar.monthrange(year, month)[1]
                            next_due_user_adv = next_due_user.replace(year=year, month=month, day=last_day)

                    elif item.frequency == Frequency.ANNUALLY:
                        try:
                            next_due_user_adv = next_due_user.replace(year=next_due_user.year + 1)
                        except ValueError:  # Handle leap day if original was Feb 29
                            next_due_user_adv = next_due_user.replace(year=next_due_user.year + 1,
                                                                      day=28)  # Or March 1st
                    else:
                        break

                    item.next_due = user_tz.localize(
                        datetime.combine(next_due_user_adv.date(), original_time_comp)).astimezone(pytz.utc)

                item.status = ActionStatus.NOT_STARTED  # Reset for next occurrence
            else:  # One-time task
                item.next_due = None
        elif item.status == ActionStatus.NOT_STARTED and item.fixed_time and not item.next_due:
            item.next_due = item.fixed_time  # fixed_time is already UTC

        # If task is not completed, not started, and has a next_due in the past, but also a fixed_time in the future
        # (e.g. recurring task whose current instance was missed, but fixed_time points to a specific time for all instances)
        # ensure next_due is not before fixed_time if fixed_time is relevant for setting.
        # This logic is complex. Current setup: fixed_time is the "template", next_due is the "instance".

    def _recalculate_next_due_for_all(self):
        for item in self.items:
            self._recalculate_next_due(item)

    def add_item(self, item_data: dict[str, Any], by_ai: bool = False, imported: bool = False) -> ActionItem:
        item_data['_user_timezone_str'] = self.settings.timezone  # For validation context
        item = ActionItem.model_validate(
            item_data)  # Pydantic handles string->datetime, then model_validator converts to UTC
        item.created_by_ai = by_ai
        item.updated_at = datetime.now(pytz.utc)  # Ensure update

        # Initial next_due for new items if not already set by iCal import logic
        if not item.next_due and item.fixed_time and item.status == ActionStatus.NOT_STARTED:
            item.next_due = item.fixed_time

        self.items.append(item)
        self._add_history_entry(item, status_override=ActionStatus.NOT_STARTED,
                                notes="Item created" + (" by AI" if by_ai else "") + (
                                    " via import" if imported else ""))
        if by_ai:
            self._log_ai_action("ai_create_item", [item.id])

        self._save_data()
        return item

    def get_item_by_id(self, item_id: str) -> ActionItem | None:
        return next((item for item in self.items if item.id == item_id), None)

    def update_item(self, item_id: str, update_data: dict[str, Any], by_ai: bool = False) -> ActionItem | None:
        item = self.get_item_by_id(item_id)
        if not item: return None

        previous_data_json = item.model_dump_json() if by_ai else None

        # Pass user timezone for validation context if datetime strings are present
        update_data_with_tz_context = {**update_data, '_user_timezone_str': self.settings.timezone}

        updated_item_dict = item.model_dump()
        updated_item_dict.update(update_data_with_tz_context)

        try:
            # Re-validate the whole model to ensure consistency and proper conversions
            new_item_state = ActionItem.model_validate(updated_item_dict)
            # Preserve original ID and created_at, apply new state
            new_item_state.id = item.id
            new_item_state.created_at = item.created_at
            self.items[self.items.index(item)] = new_item_state
            item = new_item_state
        except Exception as e:
            self.app.logger.error(f"Error validating updated item data: {e}. Update aborted for item {item_id}.")
            return None  # Or raise error

        item.updated_at = datetime.now(pytz.utc)
        item.created_by_ai = by_ai

        self._recalculate_next_due(item)
        self._add_history_entry(item, notes="Item updated" + (" by AI" if by_ai else ""))

        if by_ai:
            self._log_ai_action("ai_modify_item", [item.id],
                                {item.id: previous_data_json} if previous_data_json else None)

        self._save_data()
        return item

    def remove_item(self, item_id: str, record_history: bool = True) -> bool:
        item = self.get_item_by_id(item_id)
        if not item: return False

        children_ids = [child.id for child in self.items if child.parent_id == item_id]
        for child_id in children_ids:
            self.remove_item(child_id, record_history=record_history)

        self.items = [i for i in self.items if i.id != item_id]
        if self.current_item and self.current_item.id == item_id:
            self.current_item = None

        if record_history:
            self._add_history_entry(item, status_override=ActionStatus.CANCELLED, notes="Item removed")
        self._save_data()
        return True

    def set_current_item(self, item_id: str) -> ActionItem | None:
        item = self.get_item_by_id(item_id)
        if not item: return None
        if item.status == ActionStatus.COMPLETED and item.item_type == ItemType.TASK and item.frequency == Frequency.ONE_TIME:
            return None

        self.current_item = item
        if item.status == ActionStatus.NOT_STARTED:
            item.status = ActionStatus.IN_PROGRESS
            item.updated_at = datetime.now(pytz.utc)
            self._add_history_entry(item, notes="Set as current, status to In Progress")
        else:
            self._add_history_entry(item, notes="Set as current")
        self._save_data()
        return item

    def complete_current_item(self) -> ActionItem | None:
        if not self.current_item: return None

        item_to_complete = self.current_item
        item_to_complete.status = ActionStatus.COMPLETED
        item_to_complete.last_completed = datetime.now(pytz.utc)
        item_to_complete.updated_at = datetime.now(pytz.utc)

        self._recalculate_next_due(item_to_complete)
        self._add_history_entry(item_to_complete, status_override=ActionStatus.COMPLETED, notes="Marked as completed")

        self.current_item = None  # Clear current item after completion
        self._save_data()
        return item_to_complete

    def get_suggestions(self, count: int = 2) -> list[ActionItem]:
        # Prioritize AI suggestions if ISAA is available
        if self.isaa:
            active_items_for_ai = []
            for item in self.items:
                if item.status != ActionStatus.COMPLETED and item.status != ActionStatus.CANCELLED:
                    # Convert datetimes to user's local timezone string for AI context
                    item_dump = item.model_dump_json_safe()  # This is already UTC ISO
                    # Optionally, convert to user's timezone string if AI is better with local times
                    # For now, UTC ISO is fine.
                    active_items_for_ai.append(item_dump)

            MAX_ITEMS_FOR_CONTEXT = 20
            if len(active_items_for_ai) > MAX_ITEMS_FOR_CONTEXT:
                active_items_for_ai.sort(
                    key=lambda x: (x.get('priority', 3), x.get('next_due') or '9999-12-31T23:59:59Z'))
                active_items_for_ai = active_items_for_ai[:MAX_ITEMS_FOR_CONTEXT]

            now_user_tz_str = datetime.now(self.get_user_timezone()).isoformat()

            prompt = (
                f"User's current time: {now_user_tz_str} (Timezone: {self.settings.timezone}). "
                f"Active items (tasks/notes) are provided below (datetimes are in UTC ISO format). "
                f"Suggest the top {count} item IDs to focus on. Consider priority, due dates (next_due), "
                f"and if a current item is set (current_item_id), its sub-items might be relevant. "
                f"Tasks are generally more actionable. Focus on 'not_started' or 'in_progress'.\n\n"
                f"Active Items (JSON):\n{json.dumps(active_items_for_ai, indent=2)}\n\n"
                f"Current Item ID: {self.current_item.id if self.current_item else 'None'}\n\n"
                f"Return JSON: {{ \"suggested_item_ids\": [\"id1\", \"id2\"] }}."
            )

            class SuggestedIds(BaseModel):
                suggested_item_ids: list[str]

            try:
                structured_response = asyncio.run(
                    self.isaa.format_class(SuggestedIds, prompt, agent_name="TaskCompletion"))
                if structured_response and isinstance(structured_response, dict):
                    suggested_ids_model = SuggestedIds(**structured_response)
                    ai_suggestions = [self.get_item_by_id(id_str) for id_str in suggested_ids_model.suggested_item_ids
                                      if self.get_item_by_id(id_str)]
                    if ai_suggestions: return ai_suggestions[:count]
            except Exception as e:
                self.app.logger.error(f"Error getting AI suggestions: {e}")

        # Fallback to basic suggestions
        return self._get_basic_suggestions(count)

    def _get_basic_suggestions(self, count: int = 2) -> list[ActionItem]:
        now_utc = datetime.now(pytz.utc)
        available_items = [
            item for item in self.items
            if item.status in [ActionStatus.NOT_STARTED, ActionStatus.IN_PROGRESS]
        ]

        if self.current_item:
            sub_items = [item for item in available_items if item.parent_id == self.current_item.id]
            # If current item has actionable sub-items, prioritize them
            if any(s.next_due and s.next_due < (now_utc + timedelta(hours=2)) for s in sub_items) or \
                any(s.priority <= 2 for s in sub_items):  # Urgent sub-items (due soon or high priority)
                available_items = sub_items  # Focus on sub-items
            # If no urgent sub-items, consider other items too, but maybe give slight preference to other sub-items.
            # For simplicity now, if current_item is set, and it has sub-items, suggestions come from sub-items.
            # If no sub-items, or current_item is not set, consider all available_items.
            elif sub_items:  # Has sub-items, but none are "urgent" by above criteria
                available_items = sub_items
            # If current_item has no sub_items, then general pool is used.

        def sort_key(item: ActionItem):
            # Sort by: 1. Due Date (earlier is better, None is last) 2. Priority (lower num is higher)
            due_date_utc = item.next_due if item.next_due else datetime.max.replace(tzinfo=pytz.utc)
            return (due_date_utc, item.priority)

        available_items.sort(key=sort_key)
        return available_items[:count]

    def get_history(self, limit: int = 50) -> list[HistoryEntry]:
        return sorted(self.history, key=lambda x: x.timestamp, reverse=True)[:limit]

    def get_all_items_hierarchy(self) -> dict[str, list[dict[str, Any]]]:
        # This method remains largely the same, just ensure model_dump_json_safe is used.
        # Datetimes will be ISO UTC strings. Client JS needs to handle display in user's local time.
        hierarchy = {"root": []}
        item_map = {item.id: item.model_dump_json_safe() for item in self.items}  # Uses UTC ISO dates

        # This part seems fine, it builds hierarchy based on parent_id
        processed_ids = set()
        root_items_temp = []

        for _item_id, item_dict in item_map.items():
            parent_id = item_dict.get("parent_id")
            if parent_id and parent_id in item_map:
                if "children" not in item_map[parent_id]:
                    item_map[parent_id]["children"] = []
                item_map[parent_id]["children"].append(item_dict)
            else:
                root_items_temp.append(item_dict)
        hierarchy["root"] = root_items_temp

        def sort_children_recursive(node_list):
            for node_dict in node_list:
                if "children" in node_dict:
                    # Sort children by priority, then creation date
                    node_dict["children"].sort(key=lambda x: (x.get('priority', 3), isoparse(x.get('created_at'))))
                    sort_children_recursive(node_dict["children"])

        # Sort root items
        hierarchy["root"].sort(key=lambda x: (x.get('priority', 3), isoparse(x.get('created_at'))))
        sort_children_recursive(hierarchy["root"])
        return hierarchy

    # --- AI Specific Methods ---
    async def ai_create_item_from_text(self, text: str) -> ActionItem | None:
        if not self.isaa:
            self.app.logger.warning("ISAA module not available for AI item creation.")
            return None

        class ParsedItemFromText(BaseModel):
            item_type: Literal["task", "note"] = "task"
            title: str
            description: str | None = None
            priority: int | None = Field(default=3, ge=1, le=5)
            due_date_str: str | None = None  # e.g., "tomorrow", "next monday at 5pm", "2024-12-25 17:00"
            frequency_str: str | None = Field(default="one_time",
                                                 description="e.g. 'daily', 'weekly', 'one_time', 'every friday'")

        user_tz = self.get_user_timezone()
        current_time_user_tz_str = datetime.now(user_tz).strftime('%Y-%m-%d %H:%M:%S %Z%z')
        prompt = (
            f"User's current time is {current_time_user_tz_str}. Parse the input into a structured item. "
            f"For due_date_str, interpret relative dates/times based on this current time and output "
            f"a specific date string like 'YYYY-MM-DD HH:MM:SS'. If time is omitted, assume a default like 9 AM. "
            f"If date is omitted but time is given (e.g. 'at 5pm'), assume today if 5pm is future, else tomorrow. "
            f"User input: \"{text}\"\n\n"
            f"Format as JSON for ParsedItemFromText."
        )
        try:
            raw_response = await self.isaa.mini_task_completion(prompt, agent_name="TaskCompletion")
            if not raw_response: self.app.logger.error("AI parsing returned empty."); return None

            json_str = raw_response
            if "```json" in json_str: json_str = json_str.split("```json")[1].split("```")[0].strip()
            parsed_dict = json.loads(json_str)
            parsed_data_model = ParsedItemFromText(**parsed_dict)

            item_constructor_data = {
                "item_type": ItemType(parsed_data_model.item_type),
                "title": parsed_data_model.title,
                "description": parsed_data_model.description,
                "priority": parsed_data_model.priority or 3,
            }

            if parsed_data_model.due_date_str:
                # ISAA is prompted to return YYYY-MM-DD HH:MM:SS.
                # This string is assumed to be in the user's local timezone.
                # The ActionItem model_validator will convert this to UTC.
                item_constructor_data["fixed_time"] = parsed_data_model.due_date_str  # Pass as string

            # Frequency parsing (simplified)
            if parsed_data_model.frequency_str:
                freq_str_lower = parsed_data_model.frequency_str.lower()
                if "daily" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.DAILY
                elif "weekly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.WEEKLY
                elif "monthly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.MONTHLY
                elif "annually" in freq_str_lower or "yearly" in freq_str_lower:
                    item_constructor_data["frequency"] = Frequency.ANNUALLY
                else:
                    item_constructor_data["frequency"] = Frequency.ONE_TIME

            return self.add_item(item_constructor_data, by_ai=True)
        except Exception as e:
            self.app.logger.error(
                f"Error creating item with AI: {e}. Raw: {raw_response if 'raw_response' in locals() else 'N/A'}")
            return None

    def _log_ai_action(self, action_type: Literal["ai_create_item", "ai_modify_item", "ical_import"],
                       item_ids: list[str], previous_data_map: dict[str, str] | None = None):
        entry = UndoLogEntry(action_type=action_type, item_ids=item_ids, previous_data_json_map=previous_data_map)
        self.undo_log.append(entry)
        if len(self.undo_log) > 20: self.undo_log = self.undo_log[-20:]
        # _save_data called by caller

    async def undo_last_ai_action(self) -> bool:  # Also handles iCal import undo
        if not self.undo_log: return False
        last_action = self.undo_log.pop()
        action_undone_count = 0

        if last_action.action_type in ["ai_create_item", "ical_import"]:
            for item_id in last_action.item_ids:
                if self.remove_item(item_id, record_history=False):  # Don't double-log removal for undo
                    action_undone_count += 1
        elif last_action.action_type == "ai_modify_item":
            if last_action.previous_data_json_map:
                for item_id, prev_data_json in last_action.previous_data_json_map.items():
                    try:
                        prev_data = ActionItem.model_validate_json_safe(json.loads(prev_data_json),
                                                                        user_timezone_str=self.settings.timezone)
                        # Replace item
                        found = False
                        for i, item_in_list in enumerate(self.items):
                            if item_in_list.id == item_id:
                                self.items[i] = prev_data
                                if self.current_item and self.current_item.id == item_id:
                                    self.current_item = prev_data
                                found = True
                                break
                        if found:
                            action_undone_count += 1
                        else:
                            self.app.logger.warning(f"Could not find item {item_id} to restore during AI undo.")
                    except Exception as e:
                        self.app.logger.error(f"Error restoring item {item_id} during undo: {e}")
            else:  # Should not happen for modify
                self.app.logger.warning(
                    f"Undo for AI modify action on item(s) {last_action.item_ids} had no previous_data_json_map.")

        if action_undone_count > 0:
            # Create a generic history entry for the undo action
            generic_undo_item_title = f"Related to {len(last_action.item_ids)} item(s)"
            if len(last_action.item_ids) == 1:
                item_for_title = self.get_item_by_id(last_action.item_ids[0])  # Might be None if it was a create undo
                generic_undo_item_title = item_for_title.title if item_for_title else "N/A (Undone Action)"

            self.history.append(HistoryEntry(
                item_id=last_action.item_ids[0],  # Representative item
                item_title=generic_undo_item_title,
                item_type=ItemType.TASK,  # Generic
                status_changed_to=ActionStatus.CANCELLED,  # Generic status for undo
                notes=f"Undid action: {last_action.action_type} for {len(last_action.item_ids)} item(s)."
            ))
            self._save_data()
            return True

        # If nothing was undone, put action back to log
        self.undo_log.append(last_action)
        return False

    # --- iCalendar Methods ---
    def _parse_ical_dt(self, dt_ical: vDatetime | vDate, user_tz: pytz.BaseTzInfo) -> datetime | None:
        """Converts icalendar vDatetime or vDate to UTC datetime."""
        if not dt_ical: return None
        dt_val = dt_ical.dt

        if isinstance(dt_val, datetime):
            if dt_val.tzinfo is None:  # Naive datetime, assume user's local timezone as per iCal spec for floating
                return user_tz.localize(dt_val).astimezone(pytz.utc)
            return dt_val.astimezone(pytz.utc)  # Aware datetime
        elif isinstance(dt_val, date):  # All-day event, represent as start of day in user's TZ, then UTC
            return user_tz.localize(datetime.combine(dt_val, datetime.min.time())).astimezone(pytz.utc)
        return None

    def _map_ical_priority_to_app(self, ical_priority: int | None) -> int:
        if ical_priority is None: return 3  # Default
        if 1 <= ical_priority <= 4: return 1  # High
        if ical_priority == 5: return 3  # Medium
        if 6 <= ical_priority <= 9: return 5  # Low
        return 3  # Default for 0 or other values

    def _map_app_priority_to_ical(self, app_priority: int) -> int:
        if app_priority == 1: return 1  # High
        if app_priority == 2: return 3
        if app_priority == 3: return 5  # Medium
        if app_priority == 4: return 7
        if app_priority == 5: return 9  # Low
        return 0  # No priority

    def _map_rrule_to_frequency(self, rrule_prop: vRecur | None) -> tuple[Frequency, str | None]:
        if not rrule_prop:
            return Frequency.ONE_TIME, None

        rrule_dict = rrule_prop.to_dict()
        freq = rrule_dict.get('FREQ')
        original_rrule_str = vRecur.from_dict(rrule_dict).to_ical().decode('utf-8')

        if freq == 'DAILY': return Frequency.DAILY, original_rrule_str
        if freq == 'WEEKLY': return Frequency.WEEKLY, original_rrule_str
        if freq == 'MONTHLY': return Frequency.MONTHLY, original_rrule_str
        if freq == 'YEARLY': return Frequency.ANNUALLY, original_rrule_str

        # If RRULE is complex or not a direct match, import as ONE_TIME for each instance
        # but store the original RRULE string for reference or future advanced handling.
        return Frequency.ONE_TIME, original_rrule_str

    def import_ical_events(self, ical_string: str) -> list[ActionItem]:
        imported_items: list[ActionItem] = []
        try:
            cal = iCalCalendar.from_ical(ical_string)
            user_tz = self.get_user_timezone()
            now_utc = datetime.now(pytz.utc)
            import_limit_date_utc = now_utc + timedelta(days=RECURRING_IMPORT_WINDOW_DAYS)

            processed_uids_for_session = set()  # To avoid processing same base recurring event multiple times in one import

            for component in cal.walk():
                if component.name == "VEVENT":
                    uid = component.get('uid')
                    if not uid:
                        uid = str(uuid.uuid4())  # Generate a UID if missing
                    else:
                        uid = uid.to_ical().decode('utf-8')

                    summary = component.get('summary', 'Untitled Event').to_ical().decode('utf-8')
                    description = component.get('description', '').to_ical().decode('utf-8')
                    location = component.get('location', '').to_ical().decode('utf-8')
                    dtstart_ical = component.get('dtstart')
                    dtend_ical = component.get('dtend')  # Can be used for duration if needed
                    ical_priority_val = component.get('priority')
                    ical_priority = int(ical_priority_val.to_ical().decode('utf-8')) if ical_priority_val else None

                    rrule_prop = component.get('rrule')  # This is a vRecur object or None

                    start_time_utc = self._parse_ical_dt(dtstart_ical, user_tz)
                    if not start_time_utc:
                        self.app.logger.warning(f"Skipping event '{summary}' due to missing/invalid DTSTART.")
                        continue

                    app_priority = self._map_ical_priority_to_app(ical_priority)

                    # Check for existing item with this iCal UID to potentially update (simplistic check)
                    # A more robust update would involve comparing sequence numbers, etc.
                    # For now, if UID exists, we might skip or update. Let's try to update.
                    # To keep it simpler for now, we will create new items for occurrences.
                    # UID management needs to be precise for updates.
                    # If an item is an instance of a recurring event, its UID in our system might be base_uid + occurrence_date.

                    if rrule_prop:
                        if uid in processed_uids_for_session:  # Already processed this recurring event's base
                            continue
                        processed_uids_for_session.add(uid)

                        # Handle recurring event
                        rrule_str = rrule_prop.to_ical().decode('utf-8')
                        # Ensure DTSTART is part of the rrule context if not explicitly in rrulestr
                        if 'DTSTART' not in rrule_str.upper() and start_time_utc:
                            # dateutil.rrule needs start time; icalendar often bakes it in.
                            # If start_time_utc is naive, use user_tz to make it aware.
                            dtstart_for_rrule = start_time_utc.astimezone(
                                user_tz) if start_time_utc.tzinfo else user_tz.localize(start_time_utc)
                            # rrule_obj = rrulestr(rrule_str, dtstart=dtstart_for_rrule) # This is complex due to TZ handling in rrulestr
                            # The icalendar library's component should be timezone aware from DTSTART
                            # So, let's assume dtstart_ical.dt is the correct starting point.
                            try:
                                rrule_obj = rrulestr(rrule_str, dtstart=dtstart_ical.dt)
                            except Exception as e_rr:
                                self.app.logger.error(
                                    f"Could not parse RRULE '{rrule_str}' for event '{summary}': {e_rr}")
                                continue

                        occurrences_imported = 0
                        # Generate occurrences starting from now (in user's timezone, aligned to event's time)
                        # or from event's start_time_utc if it's in the future.

                        # The rrule iteration should be in the event's original timezone context if possible,
                        # or consistently in user's timezone for 'now'.
                        # Let's use UTC for iteration and then convert.

                        # Iterate from the event's actual start time or now, whichever is later for relevant future instances.
                        iteration_start_utc = max(now_utc, start_time_utc)

                        for occ_dt_aware in rrule_obj.between(iteration_start_utc, import_limit_date_utc, inc=True):
                            if occurrences_imported >= MAX_RECURRING_INSTANCES_TO_IMPORT:
                                break

                            # occ_dt_aware is usually from dateutil.rrule, may need tzinfo set or conversion.
                            # If rrulestr was given an aware dtstart, occurrences should be aware.
                            # Ensure it's UTC for our system.
                            occ_utc = occ_dt_aware.astimezone(pytz.utc) if occ_dt_aware.tzinfo else pytz.utc.localize(
                                occ_dt_aware)

                            instance_uid = f"{uid}-{occ_utc.strftime('%Y%m%dT%H%M%S%Z')}"

                            # Check if this specific instance already exists
                            existing_instance = next((item for item in self.items if item.ical_uid == instance_uid),
                                                     None)
                            if existing_instance:
                                self.app.logger.info(
                                    f"Instance {instance_uid} for '{summary}' already exists. Skipping.")
                                continue

                            item_data = {
                                "title": summary, "description": description, "location": location,
                                "item_type": ItemType.TASK, "fixed_time": occ_utc,
                                "frequency": Frequency.ONE_TIME,  # Each imported instance is one-time in our system
                                "priority": app_priority, "ical_uid": instance_uid,  # Instance-specific UID
                                "status": ActionStatus.NOT_STARTED,
                                "ical_rrule_original": rrule_str  # Store original rule for reference
                            }
                            new_item = self.add_item(item_data, imported=True)
                            imported_items.append(new_item)
                            occurrences_imported += 1

                        if occurrences_imported == 0 and start_time_utc > now_utc and start_time_utc <= import_limit_date_utc:
                            # If it's a future non-recurring event (or rrule didn't yield instances in window but start is in window)
                            # This case is for when rrule_prop exists but yields no instances in the .between() range,
                            # but the initial DTSTART itself is valid and upcoming.
                            # However, rrule.between should include dtstart if inc=True and it's within range.
                            # This path might be redundant if .between is inclusive and dtstart is in range.
                            pass


                    else:  # Non-recurring event
                        # Only import if it's upcoming or started recently and not completed (e.g. within last day)
                        if start_time_utc < (
                            now_utc - timedelta(days=1)) and not dtend_ical:  # Too old, and no end time to check
                            self.app.logger.info(f"Skipping old non-recurring event '{summary}' (UID: {uid})")
                            continue
                        if dtend_ical:
                            end_time_utc = self._parse_ical_dt(dtend_ical, user_tz)
                            if end_time_utc and end_time_utc < now_utc:  # Event has already ended
                                self.app.logger.info(f"Skipping past event '{summary}' (UID: {uid}) that has ended.")
                                continue

                        existing_item = next((item for item in self.items if item.ical_uid == uid), None)
                        if existing_item:  # Simplistic update: remove old, add new. Better: update in place.
                            self.app.logger.info(
                                f"Event with UID {uid} ('{summary}') already exists. Re-importing (simple replace).")
                            self.remove_item(existing_item.id, record_history=False)

                        item_data = {
                            "title": summary, "description": description, "location": location,
                            "item_type": ItemType.TASK, "fixed_time": start_time_utc,
                            "frequency": Frequency.ONE_TIME, "priority": app_priority,
                            "ical_uid": uid, "status": ActionStatus.NOT_STARTED
                        }
                        new_item = self.add_item(item_data, imported=True)
                        imported_items.append(new_item)

            if imported_items:
                self._log_ai_action("ical_import", [item.id for item in imported_items])
            self._save_data()  # Ensure all changes are saved
            self.app.logger.info(f"Imported {len(imported_items)} items from iCalendar data.")

        except Exception as e:
            self.app.logger.error(f"Failed to parse iCalendar string: {e}", exc_info=True)
            # Potentially re-raise or return empty list with error status
        return imported_items

    def import_ical_from_url(self, url: str) -> list[ActionItem]:
        try:
            headers = {'User-Agent': 'POA_App/1.0 (+https://yourdomain.com/poa_app_info)'}  # Be a good internet citizen
            response = requests.get(url, timeout=10, headers=headers)
            response.raise_for_status()  # Raises HTTPError for bad responses (4XX or 5XX)
            return self.import_ical_events(response.text)
        except requests.exceptions.RequestException as e:
            self.app.logger.error(f"Error fetching iCalendar from URL {url}: {e}")
            return []
        except Exception as e:  # Catch other errors like parsing
            self.app.logger.error(f"Error processing iCalendar from URL {url}: {e}")
            return []

    def import_ical_from_file_content(self, file_content: bytes) -> list[ActionItem]:
        try:
            # Try to decode as UTF-8, but iCal can have other encodings.
            # Standard is UTF-8. `icalendar` lib handles encoding detection mostly.
            ical_string = file_content.decode('utf-8', errors='replace')
            return self.import_ical_events(ical_string)
        except UnicodeDecodeError as e:
            self.app.logger.error(f"Encoding error reading iCalendar file: {e}. Try ensuring UTF-8 encoding.")
            # Try with 'latin-1' as a common fallback for some older files
            try:
                ical_string = file_content.decode('latin-1', errors='replace')
                return self.import_ical_events(ical_string)
            except Exception as e_fallback:
                self.app.logger.error(f"Fallback decoding also failed for iCalendar file: {e_fallback}")
                return []
        except Exception as e:
            self.app.logger.error(f"Error processing iCalendar file content: {e}")
            return []

    def export_to_ical_string(self) -> str:
        cal = iCalCalendar()
        cal.add('prodid', '-//POA App//yourdomain.com//')
        cal.add('version', '2.0')
        user_tz = self.get_user_timezone()

        for item in self.items:
            if item.item_type == ItemType.TASK and item.fixed_time:
                event = iCalEvent()
                event.add('summary', item.title)

                # Ensure fixed_time is UTC for iCal standard practice
                dtstart_utc = item.fixed_time
                if dtstart_utc.tzinfo is None:  # Should not happen if stored correctly
                    dtstart_utc = pytz.utc.localize(dtstart_utc)
                else:
                    dtstart_utc = dtstart_utc.astimezone(pytz.utc)
                event.add('dtstart', dtstart_utc)  # vDatetime handles UTC conversion for .to_ical()

                # Add DTEND (e.g., 1 hour duration for tasks, or based on item if available)
                # For simplicity, let's assume 1 hour duration if not specified
                event.add('dtend', dtstart_utc + timedelta(hours=1))

                event.add('dtstamp', datetime.now(pytz.utc))  # Time the event was created in iCal
                event.add('uid', item.ical_uid or item.id)  # Use original iCal UID if present, else our ID

                if item.description:
                    event.add('description', item.description)
                if item.location:
                    event.add('location', item.location)

                event.add('priority', self._map_app_priority_to_ical(item.priority))

                # Handle recurrence
                if item.frequency != Frequency.ONE_TIME:
                    if item.ical_rrule_original:  # If we have the original complex rule, use it
                        try:
                            # vRecur.from_ical requires bytes
                            event.add('rrule', vRecur.from_ical(item.ical_rrule_original.encode()))
                        except Exception as e_rrule:
                            self.app.logger.warning(
                                f"Could not parse stored original RRULE '{item.ical_rrule_original}' for item {item.id}: {e_rrule}. Exporting as simple recurrence.")
                            # Fallback to simple mapping
                            self._add_simple_rrule(event, item.frequency)
                    else:  # Map simple frequency
                        self._add_simple_rrule(event, item.frequency)

                cal.add_component(event)
        return cal.to_ical().decode('utf-8')

    def _add_simple_rrule(self, event: iCalEvent, frequency: Frequency):
        rrule_params = {}
        if frequency == Frequency.DAILY:
            rrule_params['freq'] = 'DAILY'
        elif frequency == Frequency.WEEKLY:
            rrule_params['freq'] = 'WEEKLY'
        elif frequency == Frequency.MONTHLY:
            rrule_params['freq'] = 'MONTHLY'
        elif frequency == Frequency.ANNUALLY:
            rrule_params['freq'] = 'YEARLY'

        if rrule_params:
            event.add('rrule', vRecur(rrule_params))

SchedulerManager

SchedulerManagerClass

Source code in toolboxv2/mods/SchedulerManager.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
class SchedulerManagerClass:
    def __init__(self):
        self.jobs = {}
        self.thread = None
        self.running = False
        self.last_successful_jobs = deque(maxlen=3)  # Stores last 3 successful job names
        self.job_errors = {}  # Stores job names as keys and error messages as values

    def _run(self):
        while self.running:
            schedule.run_pending()
            time.sleep(1)

    def start(self):
        if not self.running:
            self.running = True
            self.thread = threading.Thread(target=self._run, daemon=True)
            self.thread.start()

    def stop(self):
        self.running = False
        if self.thread is not None:
            self.thread.join()

    def job_wrapper(self, job_name: str, job_function: callable):
        """
        Wrap a job function to track success and errors.
        """
        def wrapped_job(*args, **kwargs):
            try:
                job_function(*args, **kwargs)
                # If the job ran successfully, store it in the success queue
                self.last_successful_jobs.append(job_name)
                if job_name in self.job_errors:
                    del self.job_errors[job_name]  # Remove error record if job succeeded after failing
            except Exception as e:
                # Capture any exceptions and store them
                self.job_errors[job_name] = str(e)

        return wrapped_job


    def register_job(self,
                     job_id: str,
                     second: int = -1,
                     func: (Callable or str) | None = None,
                     job: schedule.Job | None = None,
                     time_passer: schedule.Job | None = None,
                     object_name: str | None = None,
                     receive_job: bool = False,
                     save: bool = False,
                     max_live: bool = False,
                     serializer=serializer_default,
                     args=None, kwargs=None):
        """
            Parameters
            ----------
                job_id : str
                    id for the job for management
                second : int
                    The time interval in seconds between each call of the job.
                func : Callable or str
                    The function to be executed as the job.
                job : schedule.Job
                    An existing job object from the schedule library.
                time_passer : schedule.Job
                    A job without a function, used to specify the time interval.
                object_name : str
                    The name of the object containing in the 'func' var to be executed.
                receive_job : bool
                    A flag indicating whether the job should be received from an object from 'func' var.
                save : bool
                    A flag indicating whether the job should be saved.
                max_live : bool
                    A flag indicating whether the job should have a maximum live time.
                serializer : dill
                    json pickel or dill must have a dumps fuction
                *args, **kwargs : Any serializable and deserializable
                    Additional arguments to be passed to the job function.

            Returns
            -------
           """

        if job is None and func is None:
            return Result.default_internal_error("Both job and func are not specified."
                                                 " Please specify either job or func.")
        if job is not None and func is not None:
            return Result.default_internal_error("Both job and func are specified. Please specify either job or func.")

        if job is not None:
            def func(x):
                return x
            return self._save_job(job_id=job_id,
                                  job=job,
                                  save=save,
                                  func=func,
                                  args=args,
                                  kwargs=kwargs,
                                  serializer=serializer)

        parsed_attr = self._parse_function(func=func, object_name=object_name)

        if parsed_attr.is_error():
            parsed_attr.result.data_info = f"Error parsing function for job : {job_id}"
            return parsed_attr

        if receive_job:
            job = parsed_attr.get()
        else:
            func = parsed_attr.get()

        time_passer = self._prepare_time_passer(time_passer=time_passer,
                                                second=second)

        job_func = self._prepare_job_func(func=func,
                                          max_live=max_live,
                                          second=second,
                                          args=args,
                                          kwargs=kwargs,
                                          job_id=job_id)

        job = self._get_final_job(job=job,
                                  func=self.job_wrapper(job_id, job_func),
                                  time_passer=time_passer,
                                  job_func=job_func,
                                  args=args,
                                  kwargs=kwargs)
        if job.is_error():
            return job

        job = job.get()

        return self._save_job(job_id=job_id,
                              job=job,
                              save=save,
                              func=func,
                              args=args,
                              kwargs=kwargs,
                              serializer=serializer)

    @staticmethod
    def _parse_function(func: str or Callable, object_name):
        if isinstance(func, str) and func.endswith('.py'):
            with open(func) as file:
                func_code = file.read()
                exec(func_code)
                func = locals()[object_name]
        elif isinstance(func, str) and func.endswith('.dill') and safety_mode == 'open':
            try:
                with open(func, 'rb') as file:
                    func = dill.load(file)
            except FileNotFoundError:
                return Result.default_internal_error(f"Function file {func} not found or dill not installed")
        elif isinstance(func, str):
            local_vars = {'app': get_app(from_=Name + f".pasing.{object_name}")}
            try:
                exec(func.strip(), {}, local_vars)
            except Exception as e:
                return Result.default_internal_error(f"Function parsing failed withe {e}")
            func = local_vars[object_name]
        elif isinstance(func, Callable):
            pass
        else:
            return Result.default_internal_error("Could not parse object scheduler_manager.parse_function")
        return Result.ok(func)

    @staticmethod
    def _prepare_time_passer(time_passer, second):
        if time_passer is None and second > 0:
            return schedule.every(second).seconds
        elif time_passer is None and second <= 0:
            raise ValueError("second must be greater than 0")
        return time_passer

    def _prepare_job_func(self, func: Callable, max_live: bool, second: float, job_id: str, *args, **kwargs):
        if max_live:
            end_time = datetime.now() + timedelta(seconds=second)

            def job_func():
                if datetime.now() < end_time:
                    func(*args, **kwargs)
                else:
                    job = self.jobs.get(job_id, {}).get('job')
                    if job is not None:
                        schedule.cancel_job(job)
                    else:
                        print("Error Canceling job")

            return job_func
        return func

    @staticmethod
    def _get_final_job(job, func, time_passer, job_func, args, kwargs):
        if job is None and isinstance(func, Callable):
            job = time_passer.do(job_func, *args, **kwargs)
        elif job is not None:
            pass
        else:
            return Result.default_internal_error("No Final job found for register")
        return Result.ok(job)

    def _save_job(self, job_id, job, save, args=None, **kwargs):
        if job is not None:
            self.jobs[job_id] = {'id': job_id, 'job': job, 'save': save, 'func': job_id, 'args': args,
                                 'kwargs': kwargs}
            f = (f"Added Job {job_id} :{' - saved' if save else ''}"
                  f"{' - args ' + str(len(args)) if args else ''}"
                  f"{' - kwargs ' + str(len(kwargs.keys())) if kwargs else ''}")
            return Result.ok(f)
        else:
            return Result.default_internal_error(job_id)

    def cancel_job(self, job_id):
        if job_id not in self.jobs:
            print("Job not found")
            return
        schedule.cancel_job(self.jobs[job_id].get('job'))
        self.jobs[job_id]["cancelled"] = True
        self.jobs[job_id]["save"] = False
        print("Job cancelled")

    def del_job(self, job_id):
        if job_id not in self.jobs:
            print("Job not found")
            return
        if not self.jobs[job_id].get("cancelled", False):
            print("Job not cancelled canceling job")
            self.cancel_job(job_id)
        del self.jobs[job_id]
        print("Job deleted")

    def save_jobs(self, file_path, serializer=serializer_default):
        with open(file_path, 'wb') as file:
            save_jobs = [job for job in self.jobs.values() if job['save']]
            serializer.dump(save_jobs, file)

    def load_jobs(self, file_path, deserializer=deserializer_default):
        with open(file_path, 'rb') as file:
            jobs = deserializer.load(file)
            for job_info in jobs:
                del job_info['job']
                func = deserializer.loads(job_info['func'])
                self.register_job(job_info['id'], func=func, **job_info)

    def get_tasks_table(self):
        if not self.jobs:
            return "No tasks registered."

        # Calculate the maximum width for each column
        id_width = max(len("Task ID"), max(len(job_id) for job_id in self.jobs))
        next_run_width = len("Next Execution")
        interval_width = len("Interval")

        # Create the header
        header = f"| {'Task ID':<{id_width}} | {'Next Execution':<{next_run_width}} | {'Interval':<{interval_width}} |"
        separator = f"|{'-' * (id_width + 2)}|{'-' * (next_run_width + 2)}|{'-' * (interval_width + 2)}|"

        # Create the table rows
        rows = []
        for job_id, job_info in self.jobs.items():
            job = job_info['job']
            next_run = job.next_run.strftime("%Y-%m-%d %H:%M:%S") if job.next_run else "N/A"
            interval = self._get_interval_str(job)
            row = f"| {job_id:<{id_width}} | {next_run:<{next_run_width}} | {interval:<{interval_width}} |"
            rows.append(row)

        # Combine all parts of the table
        table = "\n".join([header, separator] + rows)
        return table

    def _get_interval_str(self, job):
        if job.interval == 0:
            return "Once"

        units = [
            (86400, "day"),
            (3600, "hour"),
            (60, "minute"),
            (1, "second")
        ]

        for seconds, unit in units:
            if job.interval % seconds == 0:
                count = job.interval // seconds
                return f"Every {count} {unit}{'s' if count > 1 else ''}"

        return f"Every {job.interval} seconds"
job_wrapper(job_name, job_function)

Wrap a job function to track success and errors.

Source code in toolboxv2/mods/SchedulerManager.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def job_wrapper(self, job_name: str, job_function: callable):
    """
    Wrap a job function to track success and errors.
    """
    def wrapped_job(*args, **kwargs):
        try:
            job_function(*args, **kwargs)
            # If the job ran successfully, store it in the success queue
            self.last_successful_jobs.append(job_name)
            if job_name in self.job_errors:
                del self.job_errors[job_name]  # Remove error record if job succeeded after failing
        except Exception as e:
            # Capture any exceptions and store them
            self.job_errors[job_name] = str(e)

    return wrapped_job
register_job(job_id, second=-1, func=None, job=None, time_passer=None, object_name=None, receive_job=False, save=False, max_live=False, serializer=serializer_default, args=None, kwargs=None)
Parameters
job_id : str
    id for the job for management
second : int
    The time interval in seconds between each call of the job.
func : Callable or str
    The function to be executed as the job.
job : schedule.Job
    An existing job object from the schedule library.
time_passer : schedule.Job
    A job without a function, used to specify the time interval.
object_name : str
    The name of the object containing in the 'func' var to be executed.
receive_job : bool
    A flag indicating whether the job should be received from an object from 'func' var.
save : bool
    A flag indicating whether the job should be saved.
max_live : bool
    A flag indicating whether the job should have a maximum live time.
serializer : dill
    json pickel or dill must have a dumps fuction
*args, **kwargs : Any serializable and deserializable
    Additional arguments to be passed to the job function.
Returns
Source code in toolboxv2/mods/SchedulerManager.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def register_job(self,
                 job_id: str,
                 second: int = -1,
                 func: (Callable or str) | None = None,
                 job: schedule.Job | None = None,
                 time_passer: schedule.Job | None = None,
                 object_name: str | None = None,
                 receive_job: bool = False,
                 save: bool = False,
                 max_live: bool = False,
                 serializer=serializer_default,
                 args=None, kwargs=None):
    """
        Parameters
        ----------
            job_id : str
                id for the job for management
            second : int
                The time interval in seconds between each call of the job.
            func : Callable or str
                The function to be executed as the job.
            job : schedule.Job
                An existing job object from the schedule library.
            time_passer : schedule.Job
                A job without a function, used to specify the time interval.
            object_name : str
                The name of the object containing in the 'func' var to be executed.
            receive_job : bool
                A flag indicating whether the job should be received from an object from 'func' var.
            save : bool
                A flag indicating whether the job should be saved.
            max_live : bool
                A flag indicating whether the job should have a maximum live time.
            serializer : dill
                json pickel or dill must have a dumps fuction
            *args, **kwargs : Any serializable and deserializable
                Additional arguments to be passed to the job function.

        Returns
        -------
       """

    if job is None and func is None:
        return Result.default_internal_error("Both job and func are not specified."
                                             " Please specify either job or func.")
    if job is not None and func is not None:
        return Result.default_internal_error("Both job and func are specified. Please specify either job or func.")

    if job is not None:
        def func(x):
            return x
        return self._save_job(job_id=job_id,
                              job=job,
                              save=save,
                              func=func,
                              args=args,
                              kwargs=kwargs,
                              serializer=serializer)

    parsed_attr = self._parse_function(func=func, object_name=object_name)

    if parsed_attr.is_error():
        parsed_attr.result.data_info = f"Error parsing function for job : {job_id}"
        return parsed_attr

    if receive_job:
        job = parsed_attr.get()
    else:
        func = parsed_attr.get()

    time_passer = self._prepare_time_passer(time_passer=time_passer,
                                            second=second)

    job_func = self._prepare_job_func(func=func,
                                      max_live=max_live,
                                      second=second,
                                      args=args,
                                      kwargs=kwargs,
                                      job_id=job_id)

    job = self._get_final_job(job=job,
                              func=self.job_wrapper(job_id, job_func),
                              time_passer=time_passer,
                              job_func=job_func,
                              args=args,
                              kwargs=kwargs)
    if job.is_error():
        return job

    job = job.get()

    return self._save_job(job_id=job_id,
                          job=job,
                          save=save,
                          func=func,
                          args=args,
                          kwargs=kwargs,
                          serializer=serializer)

Tools

Bases: MainTool, SchedulerManagerClass

Source code in toolboxv2/mods/SchedulerManager.py
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
class Tools(MainTool, SchedulerManagerClass):
    version = version

    def __init__(self, app=None):
        self.name = Name
        self.color = "VIOLET2"

        self.keys = {"mode": "db~mode~~:"}
        self.encoding = 'utf-8'
        self.tools = {'name': Name}

        SchedulerManagerClass.__init__(self)
        MainTool.__init__(self,
                          load=self.init_sm,
                          v=self.version,
                          name=self.name,
                          color=self.color,
                          on_exit=self.on_exit)


    @export(
        mod_name=Name,
        name="Version",
        version=version,
    )
    def get_version(self):
        return self.version

    # Exportieren der Scheduler-Instanz für die Nutzung in anderen Modulen
    @export(mod_name=Name, name='init', version=version, initial=True)
    def init_sm(self):
        if os.path.exists(self.app.data_dir + '/jobs.compact'):
            print("SchedulerManager try loading from file")
            self.load_jobs(
                self.app.data_dir + '/jobs.compact'
            )
            print("SchedulerManager Successfully loaded")
        print("STARTING SchedulerManager")
        self.start()

    @export(mod_name=Name, name='clos_manager', version=version, exit_f=True)
    def on_exit(self):
        self.stop()
        self.save_jobs(self.app.data_dir + '/jobs.compact')
        return f"saved {len(self.jobs.keys())} jobs in {self.app.data_dir + '/jobs.compact'}"

    @export(mod_name=Name, name='instance', version=version)
    def get_instance(self):
        return self

    @export(mod_name=Name, name='start', version=version)
    def start_instance(self):
        return self.start()

    @export(mod_name=Name, name='stop', version=version)
    def stop_instance(self):
        return self.stop()

    @export(mod_name=Name, name='cancel', version=version)
    def cancel_instance(self, job_id):
        return self.cancel_job(job_id)

    @export(mod_name=Name, name='dealt', version=version)
    def dealt_instance(self, job_id):
        return self.del_job(job_id)

    @export(mod_name=Name, name='add', version=version)
    def register_instance(self, job_data: dict):
        """
        example dicts :
            -----------
            {
                "job_id": "job0",
                "second": 0,
                "func": None,
                "job": None,
                "time_passer": None,
                "object_name": "tb_job_fuction",
                "receive_job": False,
                "save": False,
                "max_live": True,
                # just lev it out "serializer": serializer_default,
                "args": [],
                "kwargs": {},
            }

            job_id : str
                id for the job for management
            second (optional): int
                The time interval in seconds between each call of the job.
            func (optional): Callable or str
                The function to be executed as the job.
            job (optional):  schedule.Job
                An existing job object from the schedule library.
            time_passer (optional):  schedule.Job
                A job without a function, used to specify the time interval.
            object_name (optional): str
                The name of the object containing in the 'func' var to be executed.
            receive_job (optional): bool
                A flag indicating whether the job should be received from an object from 'func' var.
            save (optional): bool
                A flag indicating whether the job should be saved.
            max_live (optional): bool
                A flag indicating whether the job should have a maximum live time.
            serializer (optional): bool
                json pickel or dill must have a dumps fuction
            *args, **kwargs (optional):
                Additional arguments to be passed to the job function.


        Parameters
            ----------
           job_data : dict

        example usage
            ----------
            `python

            `

    """
        if job_data is None:
            self.app.logger.error("No job data provided")
            return None
        job_id = job_data["job_id"]
        second = job_data.get("second", 0)
        func = job_data.get("func")
        job = job_data.get("job")
        time_passer = job_data.get("time_passer")
        object_name = job_data.get("object_name", "tb_job_fuction")
        receive_job = job_data.get("receive_job", False)
        save = job_data.get("save", False)
        max_live = job_data.get("max_live", True)
        serializer = job_data.get("serializer", serializer_default)
        args = job_data.get("args", ())
        kwargs = job_data.get("kwargs", {})

        return self.register_job(
            job_id=job_id,
            second=second,
            func=func,
            job=job,
            time_passer=time_passer,
            object_name=object_name,
            receive_job=receive_job,
            save=save,
            max_live=max_live,
            serializer=serializer,
            args=args,
            kwargs=kwargs
        )
register_instance(job_data)
example dicts

{ "job_id": "job0", "second": 0, "func": None, "job": None, "time_passer": None, "object_name": "tb_job_fuction", "receive_job": False, "save": False, "max_live": True, # just lev it out "serializer": serializer_default, "args": [], "kwargs": {}, }

job_id : str id for the job for management second (optional): int The time interval in seconds between each call of the job. func (optional): Callable or str The function to be executed as the job. job (optional): schedule.Job An existing job object from the schedule library. time_passer (optional): schedule.Job A job without a function, used to specify the time interval. object_name (optional): str The name of the object containing in the 'func' var to be executed. receive_job (optional): bool A flag indicating whether the job should be received from an object from 'func' var. save (optional): bool A flag indicating whether the job should be saved. max_live (optional): bool A flag indicating whether the job should have a maximum live time. serializer (optional): bool json pickel or dill must have a dumps fuction args, *kwargs (optional): Additional arguments to be passed to the job function.

Parameters ---------- job_data : dict

example usage ---------- `python

`
Source code in toolboxv2/mods/SchedulerManager.py
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
@export(mod_name=Name, name='add', version=version)
def register_instance(self, job_data: dict):
    """
    example dicts :
        -----------
        {
            "job_id": "job0",
            "second": 0,
            "func": None,
            "job": None,
            "time_passer": None,
            "object_name": "tb_job_fuction",
            "receive_job": False,
            "save": False,
            "max_live": True,
            # just lev it out "serializer": serializer_default,
            "args": [],
            "kwargs": {},
        }

        job_id : str
            id for the job for management
        second (optional): int
            The time interval in seconds between each call of the job.
        func (optional): Callable or str
            The function to be executed as the job.
        job (optional):  schedule.Job
            An existing job object from the schedule library.
        time_passer (optional):  schedule.Job
            A job without a function, used to specify the time interval.
        object_name (optional): str
            The name of the object containing in the 'func' var to be executed.
        receive_job (optional): bool
            A flag indicating whether the job should be received from an object from 'func' var.
        save (optional): bool
            A flag indicating whether the job should be saved.
        max_live (optional): bool
            A flag indicating whether the job should have a maximum live time.
        serializer (optional): bool
            json pickel or dill must have a dumps fuction
        *args, **kwargs (optional):
            Additional arguments to be passed to the job function.


    Parameters
        ----------
       job_data : dict

    example usage
        ----------
        `python

        `

"""
    if job_data is None:
        self.app.logger.error("No job data provided")
        return None
    job_id = job_data["job_id"]
    second = job_data.get("second", 0)
    func = job_data.get("func")
    job = job_data.get("job")
    time_passer = job_data.get("time_passer")
    object_name = job_data.get("object_name", "tb_job_fuction")
    receive_job = job_data.get("receive_job", False)
    save = job_data.get("save", False)
    max_live = job_data.get("max_live", True)
    serializer = job_data.get("serializer", serializer_default)
    args = job_data.get("args", ())
    kwargs = job_data.get("kwargs", {})

    return self.register_job(
        job_id=job_id,
        second=second,
        func=func,
        job=job,
        time_passer=time_passer,
        object_name=object_name,
        receive_job=receive_job,
        save=save,
        max_live=max_live,
        serializer=serializer,
        args=args,
        kwargs=kwargs
    )

SocketManager

The SocketManager Supports 2 types of connections 1. Client Server 2. Peer to Peer

TruthSeeker

arXivCrawler

ArXiv Crawler for TruthSeeker. Main module for processing research queries.

ArXivPDFProcessor

Main processor for research queries. This is a wrapper around the new ResearchProcessor for backward compatibility.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
class ArXivPDFProcessor:
    """
    Main processor for research queries.
    This is a wrapper around the new ResearchProcessor for backward compatibility.
    """
    def __init__(self,
                 query: str,
                 tools,
                 chunk_size: int = 1_000_000,
                 overlap: int = 2_000,
                 max_workers=None,
                 num_search_result_per_query=6,
                 max_search=6,
                 download_dir="pdfs",
                 callback=None,
                 num_workers=None):
        """Initialize the ArXiv PDF processor.

        Args:
            query: Research query
            tools: Tools module
            chunk_size: Size of text chunks for processing
            overlap: Overlap between chunks
            max_workers: Maximum number of worker threads
            num_search_result_per_query: Number of search results per query
            max_search: Maximum number of search queries
            download_dir: Directory to save downloaded files
            callback: Callback function for status updates
            num_workers: Number of worker threads
        """
        # Create the new research processor
        self.processor = ResearchProcessor(
            query=query,
            tools=tools,
            chunk_size=chunk_size,
            overlap=overlap,
            max_workers=max_workers,
            num_search_result_per_query=num_search_result_per_query,
            max_search=max_search,
            download_dir=download_dir,
            callback=callback,
            num_workers=num_workers
        )

        # Copy attributes for backward compatibility
        self.insights_generated = False
        self.queries_generated = False
        self.query = query
        self.tools = tools
        self.mem = tools.get_memory()
        self.chunk_size = chunk_size
        self.overlap = overlap
        self.max_workers = max_workers
        self.nsrpq = num_search_result_per_query
        self.max_search = max_search
        self.download_dir = download_dir
        self.parser = RobustPDFDownloader(download_dir=download_dir)
        self.callback = callback if callback is not None else lambda status: None
        self.mem_name = None
        self.current_session = None
        self.all_ref_papers = 0
        self.last_insights_list = None
        self.all_texts_len = 0
        self.f_texts_len = 0
        self.s_id = str(uuid.uuid4())
        self.semantic_model = self.processor.semantic_model
        self._query_progress = {}
        self._progress_lock = threading.Lock()
        self.num_workers = self.processor.num_workers

    def _update_global_progress(self) -> float:
        """Calculate overall progress considering all processing phases."""
        return self.processor._update_global_progress()

    async def search_and_process_papers(self, queries: list[str]) -> list[Paper]:
        """Search for and process papers based on queries.

        Args:
            queries: List of search queries

        Returns:
            List of processed papers
        """
        # Use the new processor to search and process papers
        unified_papers = await self.processor.search_and_process_papers(queries)

        # Convert UnifiedPaper objects to Paper objects for backward compatibility
        papers = []
        for paper in unified_papers:
            if paper.source == "arxiv":
                # Convert to the old Paper format
                arxiv_paper = Paper(
                    title=paper.title,
                    authors=paper.authors,
                    summary=paper.summary,
                    url=paper.url,
                    pdf_url=paper.pdf_url,
                    published=paper.published,
                    updated=paper.source_specific_data.get("updated", ""),
                    categories=paper.source_specific_data.get("categories", []),
                    paper_id=paper.paper_id
                )
                papers.append(arxiv_paper)

        # Update attributes for backward compatibility
        self.all_ref_papers = self.processor.all_ref_papers
        self.all_texts_len = self.processor.all_texts_len
        self.f_texts_len = self.processor.f_texts_len

        return papers

    def send_status(self, step: str, progress: float = None, additional_info: str = ""):
        """Send status update via callback."""
        if progress is None:
            progress = self._update_global_progress()
        self.callback({
            "step": step,
            "progress": progress,
            "info": additional_info
        })

    def generate_queries(self) -> list[str]:
        self.send_status("Generating search queries")
        self.queries_generated = False

        class ArXivQueries(BaseModel):
            queries: list[str] = Field(..., description="List of ArXiv search queries (en)")

        try:
            query_generator: ArXivQueries = self.tools.format_class(
                ArXivQueries,
                f"Generate a list of precise ArXiv search queries to comprehensively address: {self.query}"
            )
            queries = [self.query] + query_generator["queries"]
        except Exception:
            self.send_status("Error generating queries", additional_info="Using default query.")
            queries = [self.query]

        if len(queries[:self.max_search]) > 0:
            self.queries_generated = True
        return queries[:self.max_search]

    def init_process_papers(self):
        self.mem.create_memory(self.mem_name, model_config={"model_name": "anthropic/claude-3-5-haiku-20241022"})
        self.send_status("Memory initialized")


    async def generate_insights(self, queries) -> dict:
        self.send_status("Generating insights")
        query = self.query
        # max_it = 0
        results = await self.mem.query(query=query, memory_names=self.mem_name, unified_retrieve=True, query_params={
            "max_sentences": 25})
        #query = queries[min(len(queries)-1, max_it)]

        self.insights_generated = True
        self.send_status("Insights generated", progress=1.0)
        return results

    async def extra_query(self, query, query_params=None, unified_retrieve=True):
        self.send_status("Processing follow-up query", progress=0.5)
        results = await self.mem.query(query=query, memory_names=self.mem_name,
                                                      query_params=query_params, unified_retrieve=unified_retrieve)
        self.send_status("Processing follow-up query Done", progress=1)
        return results

    def generate_mem_name(self):
        class UniqueMemoryName(BaseModel):
            """unique memory name based on the user query"""
            name: str
        return self.tools.get_agent("thinkm").format_class(UniqueMemoryName, self.query).get('name', '_'.join(self.query.split(" ")[:3]))

    def initialize(self, session_id, second=False):
        self.current_session = session_id
        self.insights_generated = False
        self.queries_generated = False
        if second:
            return
        self.mem_name = self.generate_mem_name().strip().replace("\n", '') + '_' + session_id
        self.init_process_papers()

    async def process(self, query=None) -> tuple[list[Paper], dict]:
        if query is not None:
            self.query = query
        self.send_status("Starting research process")
        t0 = time.perf_counter()
        self.initialize(self.s_id, query is not None)

        queries = self.generate_queries()

        papers = await self.search_and_process_papers(queries)

        if len(papers) == 0:
            class UserQuery(BaseModel):
                """Fix all typos and clear the original user query"""
                new_query: str
            self.query= self.tools.format_class(
                UserQuery,
                self.query
            )["new_query"]
            queries = self.generate_queries()
            papers = await self.search_and_process_papers(queries)

        insights = await self.generate_insights(queries)

        elapsed_time = time.perf_counter() - t0
        self.send_status("Process complete", progress=1.0,
                         additional_info=f"Total time: {elapsed_time:.2f}s, Papers analyzed: {len(papers)}/{self.all_ref_papers}")

        return papers, insights

    @staticmethod
    def estimate_processing_metrics(query_length: int, **config) -> (float, float):
        """Return estimated time (seconds) and price for processing."""
        total_papers = config['max_search'] * config['num_search_result_per_query']
        median_text_length = 100000  # 10 pages * 10000 characters

        # Estimated chunks to process
        total_chunks = total_papers * (median_text_length / config['chunk_size']) + 1 / config['overlap']
        processed_chunks = total_chunks * 0.45
        total_chars = TextSplitter(config['chunk_size'],
                     config['overlap']
                     ).approximate(config['chunk_size'] * processed_chunks)
        # Time estimation (seconds)
        .75 / config['chunk_size']  # Hypothetical time per chunk in seconds
        w = (config.get('num_workers', 16) if config.get('num_workers', 16) is not None else 16 / 10)
        # Processing_ time - Insights Genration - Insights Query   -   Indexing Time     -    Download Time     -       workers   -   Query Genration time - Ui - Init Db
        estimated_time = ((8+total_papers*0.012)+(total_chunks/20000) * .005 + (total_chunks/2) * .0003 + total_papers * 2.8 ) / w + (0.25 * config['max_search']) + 6 + 4

        price_per_char = 0.0000012525
        price_per_t_chunk =  total_chars * price_per_char
        estimated_price = price_per_t_chunk ** 1.7

        # estimated_price = 0 if query_length < 420 and estimated_price < 5 else estimated_price
        if estimated_time < 10:
            estimated_time = 10
        if estimated_price < .04:
            estimated_price = .04
        return round(estimated_time, 2), round(estimated_price, 4)
__init__(query, tools, chunk_size=1000000, overlap=2000, max_workers=None, num_search_result_per_query=6, max_search=6, download_dir='pdfs', callback=None, num_workers=None)

Initialize the ArXiv PDF processor.

Parameters:

Name Type Description Default
query str

Research query

required
tools

Tools module

required
chunk_size int

Size of text chunks for processing

1000000
overlap int

Overlap between chunks

2000
max_workers

Maximum number of worker threads

None
num_search_result_per_query

Number of search results per query

6
max_search

Maximum number of search queries

6
download_dir

Directory to save downloaded files

'pdfs'
callback

Callback function for status updates

None
num_workers

Number of worker threads

None
Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def __init__(self,
             query: str,
             tools,
             chunk_size: int = 1_000_000,
             overlap: int = 2_000,
             max_workers=None,
             num_search_result_per_query=6,
             max_search=6,
             download_dir="pdfs",
             callback=None,
             num_workers=None):
    """Initialize the ArXiv PDF processor.

    Args:
        query: Research query
        tools: Tools module
        chunk_size: Size of text chunks for processing
        overlap: Overlap between chunks
        max_workers: Maximum number of worker threads
        num_search_result_per_query: Number of search results per query
        max_search: Maximum number of search queries
        download_dir: Directory to save downloaded files
        callback: Callback function for status updates
        num_workers: Number of worker threads
    """
    # Create the new research processor
    self.processor = ResearchProcessor(
        query=query,
        tools=tools,
        chunk_size=chunk_size,
        overlap=overlap,
        max_workers=max_workers,
        num_search_result_per_query=num_search_result_per_query,
        max_search=max_search,
        download_dir=download_dir,
        callback=callback,
        num_workers=num_workers
    )

    # Copy attributes for backward compatibility
    self.insights_generated = False
    self.queries_generated = False
    self.query = query
    self.tools = tools
    self.mem = tools.get_memory()
    self.chunk_size = chunk_size
    self.overlap = overlap
    self.max_workers = max_workers
    self.nsrpq = num_search_result_per_query
    self.max_search = max_search
    self.download_dir = download_dir
    self.parser = RobustPDFDownloader(download_dir=download_dir)
    self.callback = callback if callback is not None else lambda status: None
    self.mem_name = None
    self.current_session = None
    self.all_ref_papers = 0
    self.last_insights_list = None
    self.all_texts_len = 0
    self.f_texts_len = 0
    self.s_id = str(uuid.uuid4())
    self.semantic_model = self.processor.semantic_model
    self._query_progress = {}
    self._progress_lock = threading.Lock()
    self.num_workers = self.processor.num_workers
estimate_processing_metrics(query_length, **config) staticmethod

Return estimated time (seconds) and price for processing.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
@staticmethod
def estimate_processing_metrics(query_length: int, **config) -> (float, float):
    """Return estimated time (seconds) and price for processing."""
    total_papers = config['max_search'] * config['num_search_result_per_query']
    median_text_length = 100000  # 10 pages * 10000 characters

    # Estimated chunks to process
    total_chunks = total_papers * (median_text_length / config['chunk_size']) + 1 / config['overlap']
    processed_chunks = total_chunks * 0.45
    total_chars = TextSplitter(config['chunk_size'],
                 config['overlap']
                 ).approximate(config['chunk_size'] * processed_chunks)
    # Time estimation (seconds)
    .75 / config['chunk_size']  # Hypothetical time per chunk in seconds
    w = (config.get('num_workers', 16) if config.get('num_workers', 16) is not None else 16 / 10)
    # Processing_ time - Insights Genration - Insights Query   -   Indexing Time     -    Download Time     -       workers   -   Query Genration time - Ui - Init Db
    estimated_time = ((8+total_papers*0.012)+(total_chunks/20000) * .005 + (total_chunks/2) * .0003 + total_papers * 2.8 ) / w + (0.25 * config['max_search']) + 6 + 4

    price_per_char = 0.0000012525
    price_per_t_chunk =  total_chars * price_per_char
    estimated_price = price_per_t_chunk ** 1.7

    # estimated_price = 0 if query_length < 420 and estimated_price < 5 else estimated_price
    if estimated_time < 10:
        estimated_time = 10
    if estimated_price < .04:
        estimated_price = .04
    return round(estimated_time, 2), round(estimated_price, 4)
search_and_process_papers(queries) async

Search for and process papers based on queries.

Parameters:

Name Type Description Default
queries list[str]

List of search queries

required

Returns:

Type Description
list[Paper]

List of processed papers

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
async def search_and_process_papers(self, queries: list[str]) -> list[Paper]:
    """Search for and process papers based on queries.

    Args:
        queries: List of search queries

    Returns:
        List of processed papers
    """
    # Use the new processor to search and process papers
    unified_papers = await self.processor.search_and_process_papers(queries)

    # Convert UnifiedPaper objects to Paper objects for backward compatibility
    papers = []
    for paper in unified_papers:
        if paper.source == "arxiv":
            # Convert to the old Paper format
            arxiv_paper = Paper(
                title=paper.title,
                authors=paper.authors,
                summary=paper.summary,
                url=paper.url,
                pdf_url=paper.pdf_url,
                published=paper.published,
                updated=paper.source_specific_data.get("updated", ""),
                categories=paper.source_specific_data.get("categories", []),
                paper_id=paper.paper_id
            )
            papers.append(arxiv_paper)

    # Update attributes for backward compatibility
    self.all_ref_papers = self.processor.all_ref_papers
    self.all_texts_len = self.processor.all_texts_len
    self.f_texts_len = self.processor.f_texts_len

    return papers
send_status(step, progress=None, additional_info='')

Send status update via callback.

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
322
323
324
325
326
327
328
329
330
def send_status(self, step: str, progress: float = None, additional_info: str = ""):
    """Send status update via callback."""
    if progress is None:
        progress = self._update_global_progress()
    self.callback({
        "step": step,
        "progress": progress,
        "info": additional_info
    })
main(query='Beste strategien in bretspielen sitler von katar') async

Main execution function

Source code in toolboxv2/mods/TruthSeeker/arXivCrawler.py
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
async def main(query: str = "Beste strategien in bretspielen sitler von katar"):
    """Main execution function"""
    with Spinner("Init Isaa"):
        tools = get_app("ArXivPDFProcessor", name=None).get_mod("isaa")
        tools.init_isaa(build=True)
    processor = ArXivPDFProcessor(query, tools=tools)
    papers, insights = await processor.process()

    print("Generated Insights:", insights)
    print("Generated Insights_list:", processor.last_insights_list)
    kb = tools.get_memory(processor.mem_name)
    print(await kb.query_concepts("AI"))
    print(await kb.retrieve("Evaluation metrics for assessing AI Agent performance"))
    print(kb.concept_extractor.concept_graph.concepts.keys())
    kb.vis(output_file="insights_graph.html")
    kb.save("mem.plk")
    # await get_app("ArXivPDFProcessor", name=None).a_idle()
    return insights

nGui

import colorsys import json import time from datetime import datetime, timedelta from queue import Queue from typing import Dict, Union, List, Any

import os import random from threading import Thread, Event

import networkx as nx from dataclasses import asdict

from toolboxv2 import get_app from toolboxv2.mods.FastApi.fast_nice import register_nicegui

import asyncio

from nicegui import ui

from pathlib import Path import stripe

from toolboxv2.mods.TruthSeeker.arXivCrawler import Paper from toolboxv2.mods.isaa.base.AgentUtils import anything_from_str_to_dict

Set your secret key (use environment variables in production!)

stripe.api_key = os.getenv('STRIPE_SECRET_KEY', 'sk_test_YourSecretKey')

def create_landing_page(): # Set up dynamic background ui.query("body").style("background: linear-gradient(135deg, #1a1a2e 0%, #16213e 100%)")

# Main container with enhanced responsive design
with ui.column().classes(
"w-full max-w-md p-8 rounded-3xl shadow-2xl "
"items-center self-center mx-auto my-8"
):
    # Advanced styling for glass-morphism effect
    ui.query(".nicegui-column").style("""
    background: rgba(255, 255, 255, 0.05);
    backdrop-filter: blur(12px);
    border: 1px solid rgba(255, 255, 255, 0.1);
    transition: all 0.3s ease-in-out;
    """)

    # Animated logo/brand icon
    with ui.element("div").classes("animate-fadeIn"):
        ui.icon("science").classes(
        "text-7xl mb-6 text-primary "
        "transform hover:scale-110 transition-transform"
        )

    # Enhanced typography for title
    ui.label("TruthSeeker").classes(
    "text-5xl font-black text-center "
    "text-primary mb-2 animate-slideDown"
    )

    # Stylized subtitle with brand message
    ui.label("Precision. Discovery. Insights.").classes(
    "text-xl font-medium text-center "
    "mb-10 animate-fadeIn"
    )

    # Button container for consistent spacing
    ui.button(
    "Start Research",
    on_click=lambda: ui.navigate.to("/open-Seeker.seek")
    ).classes(
    "w-full px-6 py-4 text-lg font-bold "
    "bg-primary hover:bg-primary-dark "
    "transform hover:-translate-y-0.5 "
    "transition-all duration-300 ease-in-out "
    "rounded-xl shadow-lg animate-slideUp"
    )

    # Navigation links container
    with ui.element("div").classes("mt-8 space-y-3 text-center"):
        ui.link(
        "Demo video",
        ).classes(
        "block text-lg text-gray-200 hover:text-primary "
        "transition-colors duration-300 animate-fadeIn"
        ).on("click", lambda: ui.navigate.to("/open-Seeker.demo"))

        ui.link(
        "About Us",
        ).classes(
        "block text-lg text-gray-400 hover:text-primary "
        "transition-colors duration-300 animate-fadeIn"
        ).on("click", lambda: ui.navigate.to("/open-Seeker.about"))

def create_video_demo(): with ui.card().classes('w-full max-w-3xl mx-auto').style( 'background: var(--background-color); color: var(--text-color)'): # Video container with responsive aspect ratio with ui.element('div').classes('relative w-full aspect-video'): video = ui.video('../api/TruthSeeker/video').classes('w-full h-full object-cover')

        # Custom controls overlay
        with ui.element('div').classes('absolute bottom-0 left-0 right-0 bg-black/50 p-2'):
            with ui.row().classes('items-center gap-2'):
                #play_btn = ui.button(icon='play_arrow', on_click=lambda: video.props('playing=true'))
                #pause_btn = ui.button(icon='pause', on_click=lambda: video.props('playing=false'))
                ui.slider(min=0, max=100, value=0).classes('w-full').bind_value(video, 'time')
                #mute_btn = ui.button(icon='volume_up', on_click=lambda: video.props('muted=!muted'))
                #fullscreen_btn = ui.button(icon='fullscreen', on_click=lambda: video.props('fullscreen=true'))


    # Video description
    ui.markdown('Walkthrough of TruthSeeker features and capabilities.')
    # Back to Home Button
    ui.button('Back to Home', on_click=lambda: ui.navigate.to('/open-Seeker')).classes(
        'mt-6 w-full bg-primary text-white hover:opacity-90'
    )

return video

def create_about_page(): """Create a comprehensive About page for TruthSeeker""" with ui.column().classes('w-full max-w-4xl mx-auto p-6'): # Page Header ui.label('About TruthSeeker').classes('text-4xl font-bold text-primary mb-6')

    # Mission Statement
    with ui.card().classes('w-full mb-6').style(
        'background: var(--background-color); color: var(--text-color); padding: 20px; border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
    ):
        ui.label('Our Mission').classes('text-2xl font-semibold text-primary mb-4')
        ui.markdown("""
            TruthSeeker aims to democratize access to scientific knowledge,
            transforming complex academic research into comprehensible insights.
            We bridge the gap between raw data and meaningful understanding.
        """).classes('text-lg').style('color: var(--text-color);')

    # Core Technologies
    with ui.card().classes('w-full mb-6').style(
        'background: var(--background-color); color: var(--text-color); padding: 20px; border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
    ):
        ui.label('Core Technologies').classes('text-2xl font-semibold text-primary mb-4')
        with ui.row().classes('gap-4 w-full'):
            with ui.column().classes('flex-1 text-center'):
                ui.icon('search').classes('text-4xl text-primary mb-2')
                ui.label('Advanced Query Processing').classes('font-bold')
                ui.markdown('Intelligent algorithms that extract nuanced research insights.').style(
                    'color: var(--text-color);')
            with ui.column().classes('flex-1 text-center'):
                ui.icon('analytics').classes('text-4xl text-primary mb-2')
                ui.label('Semantic Analysis').classes('font-bold')
                ui.markdown('Deep learning models for comprehensive research verification.').style(
                    'color: var(--text-color);')
            with ui.column().classes('flex-1 text-center'):
                ui.icon('verified').classes('text-4xl text-primary mb-2')
                ui.label('Research Validation').classes('font-bold')
                ui.markdown('Multi-layered verification of academic sources.').style('color: var(--text-color);')
    # Research Process
    with ui.card().classes('w-full').style('background: var(--background-color);color: var(--text-color);'):
        ui.label('Research Discovery Process').classes('text-2xl font-semibold text-primary mb-4')
        with ui.card().classes('q-pa-md q-mx-auto').style(
            'max-width: 800px; background: var(--background-color); border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);'
        ) as card:
            ui.markdown("# Research Workflow").style(
                "color: var(--primary-color); text-align: center; margin-bottom: 20px;")
            ui.markdown(
                """
                Welcome to TruthSeeker’s interactive research assistant. Follow the steps below to transform your initial inquiry into a refined, actionable insight.
                """
            ).style("color: var(--text-color); text-align: center; margin-bottom: 30px;")

            # The stepper component
            with ui.stepper().style('background: var(--background-color); color: var(--text-color);') as stepper:
                # Step 1: Query Initialization
                with ui.step('Query Initialization'):
                    ui.markdown("### Step 1: Query Initialization").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Begin by entering your research question or selecting from popular academic domains.
                        This sets the direction for our semantic analysis engine.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 2: Semantic Search
                with ui.step('Semantic Search'):
                    ui.markdown("### Step 2: Semantic Search").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Our advanced algorithms now process your input to generate context-rich queries.
                        This stage refines the search context by understanding the deeper intent behind your question.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 3: Document Analysis
                with ui.step('Document Analysis'):
                    ui.markdown("### Step 3: Document Analysis").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        The system then dives into a detailed analysis of academic papers, parsing content to extract key insights and connections.
                        This ensures that even subtle but crucial information is captured.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')
                        ui.button('Next', on_click=stepper.next).props('rounded color=primary')

                # Step 4: Insight Generation
                with ui.step('Insight Generation'):
                    ui.markdown("### Step 4: Insight Generation").style("color: var(--primary-color);")
                    ui.markdown(
                        """
                        Finally, we synthesize the analyzed data into clear, actionable research summaries.
                        These insights empower you with concise guidance to drive further inquiry or practical application.
                        """
                    ).style("color: var(--text-color); margin-bottom: 20px;")
                    with ui.stepper_navigation():
                        ui.button('Back', on_click=stepper.previous).props('flat')

    # Back to Home Button
    ui.button('Back to Home', on_click=lambda: ui.navigate.to('/open-Seeker')).classes(
        'mt-6 w-full bg-primary text-white hover:opacity-90'
    )
Dummy-Implementierung für get_tools()

def get_tools(): """ Hier solltest du dein richtiges Werkzeug-Objekt zurückliefern. In diesem Beispiel gehen wir davon aus, dass du über eine Funktion wie get_app verfügst. """ return get_app("ArXivPDFProcessor", name=None).get_mod("isaa")

def create_graph_tab(processor_instance: Dict, graph_ui: ui.element, main_ui: ui.element): """Create and update the graph visualization"""

# Get HTML graph from processor
_html_content = processor_instance["instance"].tools.get_memory(processor_instance["instance"].mem_name)
html_content = "" if isinstance(_html_content, list) else _html_content.vis(get_output_html=True)

# Ensure static directory exists
static_dir = Path('dist/static')
static_dir.mkdir(exist_ok=True)

# Save HTML to static file
graph_file = static_dir / f'graph{processor_instance["instance"].mem_name}.html'
# Save HTML to static file with added fullscreen functionality

# Add fullscreen JavaScript
graph_file.write_text(html_content, encoding='utf-8')

with main_ui:
    # Clear existing content except fullscreen button
    graph_ui.clear()

    with graph_ui:
        ui.html(f"""

            <iframe
                 src="/static/graph{processor_instance["instance"].mem_name}.html"
                style="width: 100%; height: 800px; border: none; background: #1a1a1a;"
                >
            </iframe>
        """).classes('w-full h-full')

is_init = [False]

--- Database Setup ---

def get_db(): db = get_app().get_mod("DB") if not is_init[0]: is_init[0] = True db.edit_cli("LD") db.initialize_database() return db

import pickle

--- Session State Management ---

def get_user_state(session_id: str, is_new=False) -> dict: db = get_db() state_ = { 'balance': .5, 'last_reset': datetime.utcnow().isoformat(), 'research_history': [], 'payment_id': '', } if session_id is None: state_['balance'] *= -1 if is_new: return state_, True return state_ state = db.get(f"TruthSeeker::session:{session_id}") if state.get() is None: state = state_ if is_new: return state_, True else: try: state = pickle.loads(state.get()) except Exception as e: print(e) state = { 'balance': 0.04, 'last_reset': datetime.utcnow().isoformat(), 'research_history': ["Sorry we had an error recreating your state"], 'payment_id': '', } if is_new: return state, True if is_new: return state, False return state

def save_user_state(session_id: str, state: dict): db = get_db() print("Saving state") db.set(f"TruthSeeker::session:{session_id}", pickle.dumps(state)).print()

def delete_user_state(session_id: str): db = get_db() print("Saving state") db.delete(f"TruthSeeker::session:{session_id}").print()

def reset_daily_balance(state: dict, valid=False) -> dict: now = datetime.utcnow() last_reset = datetime.fromisoformat(state.get('last_reset', now.isoformat())) if now - last_reset > timedelta(hours=24): state['balance'] = max(state.get('balance', 1.6 if valid else 0.5), 1.6 if valid else 0.5) state['last_reset'] = now.isoformat() return state

class MemoryResultsDisplay

def init(self, results: List[Dict[str, Any]], main_ui: ui.element): self.results = results self.main_ui = main_ui self.setup_ui()

def setup_ui(self): """Set up the main UI for displaying memory results""" with self.main_ui: self.main_ui.clear() with ui.column().classes('w-full'): for mem_result in self.results: self.create_memory_card(mem_result)

def create_memory_card(self, mem_result: Dict[str, Any]): """Create a card for each memory result""" result = mem_result.get("result", {}) with self.main_ui: if isinstance(result, dict): self.display_dict_result(result) elif hasattr(result, 'overview'): # Assuming RetrievalResult type self.display_retrieval_result(result) else: ui.label("Unsupported result type").classes('--text-color:error')

def display_dict_result(self, result: Dict[str, Any]): """Display dictionary-based result with collapsible sections""" # Summary Section summary = result.get("summary", {}) if isinstance(summary, str): try: summary = json.loads(summary[:-1]) except json.JSONDecodeError: summary = {"error": "Could not parse summary"}

# Raw Results Section
raw_results = result.get("raw_results", {})
if isinstance(raw_results, str):
    try:
        raw_results = json.loads(raw_results[:-1])
    except json.JSONDecodeError:
        raw_results = {"error": "Could not parse raw results"}

# Metadata Section
metadata = result.get("metadata", {})
with self.main_ui:
    # Collapsible Sections
    with ui.column().classes('w-full space-y-2').style("max-width: 100%;"):
        # Summary Section
        with ui.expansion('Summary', icon='description').classes('w-full') as se:
            self.display_nested_data(summary, main_ui=se)

        # Raw Results Section
        with ui.expansion('Raw Results', icon='work').classes('w-full') as re:
            self.display_nested_data(raw_results, main_ui=re)

        # Metadata Section
        if metadata:
            with ui.expansion('Metadata', icon='info').classes('w-full'):
                ui.markdown(f"```json

{json.dumps(metadata, indent=2)} ```").style("max-width: 100%;")

def display_retrieval_result(self, result):
    """Display retrieval result with detailed sections"""
    with self.main_ui:
        with ui.column().classes('w-full space-y-4').style("max-width: 100%;"):
            # Overview Section
            with ui.expansion('Overview', icon='visibility').classes('w-full') as ov:
                for overview_item in result.overview:
                    if isinstance(overview_item, str):
                        overview_item = json.loads(overview_item)
                    self.display_nested_data(overview_item, main_ui=ov)

            # Details Section
            with ui.expansion('Details', icon='article').classes('w-full'):
                for chunk in result.details:
                    with ui.card().classes('w-full p-3 mb-2').style("background: var(--background-color)"):
                        ui.label(chunk.text).classes('font-medium mb-2 --text-color:secondary')

                        with ui.row().classes('w-full justify-between').style("background: var(--background-color)"):
                            ui.label(f"Embedding Shape: {chunk.embedding.shape}").classes('text-sm')
                            ui.label(f"Content Hash: {chunk.content_hash}").classes('text-sm')

                        if chunk.cluster_id is not None:
                            ui.label(f"Cluster ID: {chunk.cluster_id}").classes('text-sm')

            # Cross References Section
            with ui.expansion('Cross References', icon='link').classes('w-full'):
                for topic, chunks in result.cross_references.items():
                    with ui.card().classes('w-full p-3 mb-2').style("background: var(--background-color)"):
                        ui.label(topic).classes('font-semibold mb-2 --text-color:secondary')
                        for chunk in chunks:
                            ui.label(chunk.text).classes('text-sm mb-1')

def display_nested_data(self, data: Union[Dict, List], indent: int = 0, main_ui=None):
    """Recursively display nested dictionary or list data"""
    with (self.main_ui if main_ui is None else main_ui):
        if isinstance(data, dict):
            with ui.column().classes(f'ml-{indent * 2}').style("max-width: 100%;"):
                for key, value in data.items():
                    with ui.row().classes('items-center'):
                        ui.label(f"{key}:").classes('font-bold mr-2 --text-color:primary')
                        if isinstance(value, list):
                            if key == "main_chunks":
                                continue
                            self.display_nested_data(value, indent + 1, main_ui=main_ui)
                        if isinstance(value, dict):
                            ui.markdown(f"```json

{json.dumps(value, indent=2)} ").classes("break-words w-full").style("max-width: 100%;") else: ui.label(str(value)).classes('--text-color:secondary') elif isinstance(data, list): with ui.column().classes(f'ml-{indent * 2}').style("max-width: 100%;"): for item in data: if isinstance(item, str): item = json.loads(item) if isinstance(item, list): self.display_nested_data(item, indent + 1, main_ui=main_ui) if isinstance(item, dict): ui.markdown(f"json {json.dumps(item, indent=2)} ```").classes("break-words w-full").style("max-width: 100%;") else: ui.label(str(item)).classes('--text-color:secondary')

def create_followup_section(processor_instance: Dict, main_ui: ui.element, session_id, balance): main_ui.clear() with main_ui: ui.label("Query Interface (1ct)").classes("text-xl font-semibold mb-4")

    # Container for query inputs
    query_container = ui.column().classes("w-full gap-4")
    query = ""  # Store references to query inputs
    # Query parameters section
    with ui.expansion("Query Parameters", icon="settings").classes("w-full") as query_e:
        with ui.grid(columns=2).classes("w-full gap-4"):
            k_input = ui.number("Results Count (k)", value=2, min=1, max=20)
            min_sim = ui.number("Min Similarity", value=.3, min=0, max=1, step=0.1)
            cross_depth = ui.number("Cross Reference Depth", value=2, min=1, max=5)
            max_cross = ui.number("Max Cross References", value=10, min=1, max=20)
            max_sent = ui.number("Max Sentences", value=10, min=1, max=50)
            unified = ui.switch("Unified Retrieve (+3ct)", value=True)

    # Results display
    with ui.element("div").classes("w-full mt-4") as results_display:
        pass
    results_display = results_display
    with query_container:
        query_input = ui.input("Query", placeholder="Enter your query...")                 .classes("w-full")
    # Control buttons
    with ui.row().classes("w-full gap-4 mt-4"):
        ui.button("Execute Query", on_click=lambda: asyncio.create_task(execute_query()))                 .classes("bg-green-600 hover:bg-green-700")
        ui.button("Clear Results", on_click=lambda: results_display.clear())                 .classes("bg-red-600 hover:bg-red-700")
query_input = query_input

async def execute_query():
    """Execute a single query with parameters"""
    nonlocal query_input, results_display, main_ui
    try:
        query_text = query_input.value
        if not query_text.strip():
            with main_ui:
                ui.notify("No Input", type="warning")
            return ""

        if not processor_instance.get("instance"):
            with main_ui:
                ui.notify("No active processor instance", type="warning")
            return
        # Collect parameters
        params = {
            "k": int(k_input.value),
            "min_similarity": min_sim.value,
            "cross_ref_depth": int(cross_depth.value),
            "max_cross_refs": int(max_cross.value),
            "max_sentences": int(max_sent.value),
            "unified": unified.value
        }
        # Construct query parameters
        query_params = {
            "k": params["k"],
            "min_similarity": params["min_similarity"],
            "cross_ref_depth": params["cross_ref_depth"],
            "max_cross_refs": params["max_cross_refs"],
            "max_sentences": params["max_sentences"]
        }

        # Execute query
        results = await processor_instance["instance"].extra_query(
            query=query_text,
            query_params=query_params,
            unified_retrieve=params["unified"]
        )
        print("results",results)
        s = get_user_state(session_id)
        s['balance'] -= .04 if unified.value else .01
        save_user_state(session_id, s)
        with main_ui:
            balance.set_text(f"Balance: {s['balance']:.2f}€")
        # Format results
        with main_ui:
            with results_display:
                MemoryResultsDisplay(results, results_display)

    except Exception as e:
        return f"Error executing query: {str(e)}

"

# Add initial query input

online_states = [0] def create_research_interface(Processor):

def helpr(request, session: dict):

    state = {'balance':0, 'research_history': []}
    main_ui = None
    with ui.column().classes("w-full max-w-6xl mx-auto p-6 space-y-6") as loading:
        ui.spinner(size='lg')
        ui.label('Initializing...').classes('ml-2')

    # Container for main content (initially hidden)
    content = ui.column().classes('hidden')

    # Extract session data before spawning thread
    session_id = session.get('ID')
    session_id_h = session.get('IDh')
    session_rid = request.row.query_params.get('session_id') if hasattr(request, 'row') else request.query_params.get('session_id')
    session_valid = session.get('valid')

    # Thread communication
    result_queue = Queue()
    ready_event = Event()

    def init_background():
        nonlocal session_id, session_id_h, session_rid, session_valid
        try:
            # Original initialization logic
            _state, is_new = get_user_state(session_id, is_new=True)

            if is_new and session_id_h != "#0":
                _state = get_user_state(session_id_h)
                save_user_state(session_id, _state)
                delete_user_state(session_id_h)
            if session_rid:
                state_: dict
                state_, is_new_ = get_user_state(session_rid, is_new=True)
                if not is_new_:
                    _state = state_.copy()
                    state_['payment_id'] = ''
                    state_['last_reset'] = datetime.utcnow().isoformat()
                    state_['research_history'] = state_['research_history'][:3]
                    state_['balance'] = 0
                    save_user_state(session_id, _state)
            _state = reset_daily_balance(_state, session_valid)
            save_user_state(session_id, _state)

            # Send result back to main thread
            result_queue.put(_state)
            ready_event.set()
        except Exception as e:
            result_queue.put(e)
            ready_event.set()

        # Start background initialization

    Thread(target=init_background).start()

    def check_ready():
        nonlocal state
        if ready_event.is_set():
            result = result_queue.get()

            # Check if initialization failed
            if isinstance(result, Exception):
                loading.clear()
                with loading:
                    ui.label(f"Error during initialization: {str(result)}").classes('text-red-500')
                return

            # Get state and build main UI
            state = result
            loading.classes('hidden')
            content.classes(remove='hidden')
            main_ui.visible = True
            with main_ui:
                balance.set_text(f"Balance: {state['balance']:.2f}€")
                show_history()
            return  # Stop the timer

        # Check again in 100ms
        ui.timer(0.1, check_ready, once=True)

    # Start checking for completion
    check_ready()

    # Wir speichern die aktive Instanz, damit Follow-Up Fragen gestellt werden können
    processor_instance = {"instance": None}

    # UI-Elemente als Platzhalter; wir definieren sie später in der UI und machen sie so
    # in den Callback-Funktionen über "nonlocal" verfügbar.
    overall_progress = None
    status_label = None
    results_card = None
    summary_content = None
    analysis_content = None
    references_content = None
    followup_card = None
    research_card = None
    config_cart = None
    progress_card = None
    balance = None
    graph_ui = None

    sr_button = None
    r_button = None
    r_text = None


    # Global config storage with default values
    config = {
        'chunk_size': 21000,
        'overlap': 600,
        'num_search_result_per_query': 3,
        'max_search': 3,
        'num_workers': None
    }

    def update_estimates():
        """
        Dummy estimation based on query length and configuration.
        (Replace with your own non-linear formula if needed.)
        """
        query_text = query.value or ""
        query_length = len(query_text)
        # For example: estimated time scales with chunk size and query length.
        estimated_time ,estimated_price = Processor.estimate_processing_metrics(query_length, **config)
        estimated_time *= max(1, online_states[0] * 6)
        if processor_instance["instance"] is not None:
            estimated_price += .25
        if estimated_time < 60:
            time_str = f"~{int(estimated_time)}s"
        elif estimated_time < 3600:
            minutes = estimated_time // 60
            seconds = estimated_time % 60
            time_str = f"~{int(minutes)}m {int(seconds)}s"
        else:
            hours = estimated_time // 3600
            minutes = (estimated_time % 3600) // 60
            time_str = f"~{int(hours)}h {int(minutes)}m"
        with main_ui:
            query_length_label.set_text(f"Total Papers: {config['max_search']*config['num_search_result_per_query']}")
            time_label.set_text(f"Processing Time: {time_str}")
            price_label.set_text(f"Price: {estimated_price:.2f}€")

        return estimated_price

    def on_config_change(event):
        """
        Update the global config based on input changes and recalc estimates.
        """
        try:
            config['chunk_size'] = int(chunk_size_input.value)
        except ValueError:
            pass
        try:
            config['overlap'] = int(overlap_input.value)
            if config['overlap'] > config['chunk_size'] / 4:
                config['overlap'] = int(config['chunk_size'] / 4)
                with main_ui:
                    overlap_input.value = config['overlap']
        except ValueError:
            pass
        try:
            config['num_search_result_per_query'] = int(num_search_result_input.value)
        except ValueError:
            pass
        try:
            config['max_search'] = int(max_search_input.value)
        except ValueError:
            pass
        try:
            config['num_workers'] = int(num_workers_input.value) if num_workers_input.value != 0 else None
        except ValueError:
            config['num_workers'] = None

        update_estimates()

    def on_query_change():
        update_estimates()

    # Callback, der vom Processor (über processor_instance.callback) aufgerufen wird.
    def update_status(data: dict):
        nonlocal overall_progress, status_label
        if not data:
            return
        # Aktualisiere den Fortschrittsbalken und den aktuellen Schritt (wenn vorhanden)
        with main_ui:
            if isinstance(data, dict):
                progress = data.get("progress", 0)
                step = data.get("step", "Processing...")
                overall_progress.value =round( progress ,2) # nicegui.linear_progress erwartet einen Wert zwischen 0 und 1
                status_label.set_text(f"{step} {data.get('info','')}")
            else:
                status_label.set_text(f"{data}")

    def start_search():
        nonlocal balance

        async def helper():
            nonlocal processor_instance, overall_progress, status_label, results_card,                     summary_content, analysis_content,config, references_content, followup_card,sr_button,r_button,r_text

            try:
                if not validate_inputs():
                    with main_ui:
                        state['balance'] += est_price
                        save_user_state(session_id, state)
                        balance.set_text(f"Balance: {state['balance']:.2f}€")
                    return
                reset_interface()
                show_progress_indicators()

                query_text = query.value.strip()
                # Erzeuge das "tools"-Objekt (abhängig von deiner konkreten Implementation)
                tools = get_tools()
                with main_ui:
                    research_card.visible = False
                    config_cart.visible = False
                    config_section.visible = False
                    query.set_value("")
                # Direkt instanziieren: Eine neue ArXivPDFProcessor-Instanz
                if processor_instance["instance"] is not None:
                    processor = processor_instance["instance"]
                    processor.chunk_size = config['chunk_size']
                    processor.overlap = config['overlap']
                    processor.num_search_result_per_query = config['num_search_result_per_query']
                    processor.max_search = config['max_search']
                    processor.num_workers = config['num_workers']
                    papers, insights = await processor.process(query_text)
                else:
                    processor = Processor(query_text, tools=tools, **config)
                # Setze den Callback so, dass Updates in der GUI angezeigt werden
                    processor.callback = update_status
                    processor_instance["instance"] = processor
                    papers, insights = await processor.process()

                update_results({
                    "papers": papers,
                    "insights": insights
                })
                with main_ui:
                    research_card.visible = True
                    config_cart.visible = True
                    show_history()

            except Exception as e:
                import traceback

                with main_ui:
                    update_status({"progress": 0, "step": "Error", "info": str(e)})
                    state['balance'] += est_price
                    save_user_state(session_id, state)
                    balance.set_text(f"Balance: {state['balance']:.2f}€")
                    ui.notify(f"Error {str(e)})", type="negative")
                    research_card.visible = True
                    config_cart.visible = True
                    config_section.visible = True
                print(traceback.format_exc())

        def target():
            get_app().run_a_from_sync(helper, )

        est_price = update_estimates()
        if est_price > state['balance']:
            with main_ui:
                ui.notify(f"Insufficient balance. Need €{est_price:.2f}", type='negative')
        else:
            state['balance'] -= est_price
            save_user_state(session_id, state)
            with main_ui:
                online_states[0] += 1
                balance.set_text(f"Balance: {state['balance']:.2f}€ Running Queries: {online_states[0]}")

            Thread(target=target, daemon=True).start()
            with main_ui:
                online_states[0] -= 1
                balance.set_text(f"Balance: {get_user_state(session_id)['balance']:.2f}€")


    def show_history():
        with config_cart:
            for idx, entry in enumerate(state['research_history']):
                with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4"):
                    ui.label(entry['query']).classes('text-sm')
                    ui.button("Open").on_click(lambda _, i=idx: load_history(i))

    def reset():
        nonlocal processor_instance, results_card, followup_card, sr_button, r_button, r_text
        processor_instance["instance"] = None
        show_progress_indicators()
        with main_ui:
            config_cart.visible = False
            config_section.visible = False
            followup_card.visible = False
            results_card.visible = False
            r_button.visible = False
            r_text.set_text("Research Interface")
            sr_button.set_text("Start Research")
        start_search()
    # UI-Aufbau

    with ui.column().classes("w-full max-w-6xl mx-auto p-6 space-y-6") as main_ui:
        balance = ui.label(f"Balance: {state['balance']:.2f}€").classes("text-s font-semibold")

        config_cart = config_cart

        # --- Research Input UI Card ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as research_card:
            r_text = ui.label("Research Interface").classes("text-3xl font-bold mb-4")

            # Query input section with auto-updating estimates
            query = ui.input("Research Query",
                                placeholder="Gib hier deine Forschungsfrage ein...",
                                value="")                     .classes("w-full min-h-[100px]")                     .on('change', lambda e: on_query_change()).style("color: var(--text-color)")

            # --- Action Buttons ---
            with ui.row().classes("mt-4"):
                sr_button =ui.button("Start Research", on_click=start_search)                         .classes("bg-blue-600 hover:bg-blue-700 py-3 rounded-lg")
                ui.button("toggle config",
                          on_click=lambda: setattr(config_section, 'visible', not config_section.visible) or show_progress_indicators()).style(
                    "color: var(--text-color)")
                r_button = ui.button("Start new Research",
                          on_click=reset).style(
                    "color: var(--text-color)")
        sr_button = sr_button
        r_button = r_button
        r_button.visible = False
        research_card = research_card

        # --- Options Cart / Configurations ---
        with ui.card_section().classes("w-full backdrop-blur-lg bg-white/10 hidden") as config_section:
            ui.separator()
            ui.label("Configuration Options").classes("text-xl font-semibold mt-4 mb-2")
            with ui.row():
                chunk_size_input = ui.number(label="Chunk Size",
                                             value=config['chunk_size'], format='%.0f', max=64_000, min=1000,
                                             step=100)                         .on('change', on_config_change).style("color: var(--text-color)")
                overlap_input = ui.number(label="Overlap",
                                          value=config['overlap'], format='%.0f', max=6400, min=100, step=50)                         .on('change', on_config_change).style("color: var(--text-color)")

            with ui.row():
                num_search_result_input = ui.number(label="Results per Query",
                                                    value=config['num_search_result_per_query'], format='%.0f',
                                                    min=1, max=100, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
                max_search_input = ui.number(label="Max Search Queries",
                                             value=config['max_search'], format='%.0f', min=1, max=100, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
                num_workers_input = ui.number(label="Number of Workers (leave empty for default)",
                                              value=0, format='%.0f', min=0, max=32, step=1)                         .on('change', on_config_change).style("color: var(--text-color)")
        config_section = config_section
        config_section.visible = False
        # --- Ergebnisse anzeigen ---
        with ui.card().classes("w-full backdrop-blur-lg p-4 bg-white/10") as results_card:
            ui.label("Research Results").classes("text-xl font-semibold mb-4")
            with ui.tabs() as tabs:
                ui.tab("Summary")
                ui.tab("References")
                ui.tab("SystemStates")
            with ui.tab_panels(tabs, value="Summary").classes("w-full").style("background-color: var(--background-color)"):
                with ui.tab_panel("Summary"):
                    summary_content = ui.markdown("").style("color : var(--text-color)")
                with ui.tab_panel("References"):
                    references_content = ui.markdown("").style("color : var(--text-color)")
                with ui.tab_panel("SystemStates"):
                    analysis_content = ui.markdown("").style("color : var(--text-color)")


        # Ergebnisse sichtbar machen, sobald sie vorliegen.
        results_card = results_card
        results_card.visible = False

        # --- Follow-Up Bereich mit mehrfachen Folgefragen und Suchparametern ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4 hidden") as followup_card:
            pass

        # Zugriff auf followup_card (falls später benötigt)
        followup_card = followup_card
        followup_card.visible = False

        # --- Fortschrittsanzeige ---
        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as progress_card:
            with ui.row():
                ui.label("Research Progress").classes("text-xl font-semibold mb-4")
                query_length_label = ui.label("").classes("mt-6 hover:text-primary transition-colors duration-300")
                time_label = ui.label("Time: ...").classes("mt-6 hover:text-primary transition-colors duration-300")
                price_label = ui.label("Price: ...").classes(
                    "mt-6 hover:text-primary transition-colors duration-300")

            overall_progress = ui.linear_progress(0).classes("w-full mb-4")
            status_label = ui.label("Warte auf Start...").classes("text-base")
        # Wir merken uns progress_card, falls wir ihn zurücksetzen wollen.
        progress_card = progress_card

        query_length_label = query_length_label
        time_label = time_label
        price_label = price_label

        with ui.card().classes("w-full backdrop-blur-lg bg-white/10 p-4") as config_cart:
            # --- Process Code Section ---
            # --- Estimated Time and Price ---
            # ui.label("History").classes("text-xl font-semibold mt-4 mb-2")
            ui.label('Research History').classes('text-xl p-4')
            show_history()

        ui.button('Add Credits', on_click=lambda: balance_overlay(session_id)).props('icon=paid')
        ui.label('About TruthSeeker').classes(
            'mt-6 text-gray-500 hover:text-primary '
            'transition-colors duration-300'
        ).on('click', lambda: ui.navigate.to('/open-Seeker.about', new_tab=True))

        with ui.element('div').classes("w-full").style("white:100%; height:100%") as graph_ui:
            pass

        with ui.card().classes("w-full p-4").style("background-color: var(--background-color)"):
            ui.label("Private Session link (restore the session on a different device)")
            base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.seek' if not 'localhost' in os.getenv("HOSTNAME") else 'http://localhost:5000/gui/open-Seeker.seek'
            ui.label(f"{base_url}?session_id={session_id}").style("white:100%")
            ui.label("Changes each time!")

        graph_ui = graph_ui
        graph_ui.visible = False
    main_ui = main_ui
    main_ui.visible = False

    # --- Hilfsfunktionen ---
    def validate_inputs() -> bool:
        if not query.value.strip():
            with main_ui:
                ui.notify("Bitte gib eine Forschungsfrage ein.", type="warning")
            return False
        return True

    def reset_interface():
        nonlocal overall_progress, status_label, results_card, followup_card
        overall_progress.value = 0
        with main_ui:
            status_label.set_text("Research startet...")
        # Ergebnisse und Follow-Up Bereich verstecken
        results_card.visible = False
        followup_card.visible = False
        graph_ui.visible = False

    def show_progress_indicators():
        nonlocal progress_card
        progress_card.visible = True

    def update_results(data: dict, save=True):
        nonlocal summary_content, analysis_content, references_content, results_card,                followup_card,graph_ui, r_button, r_text, sr_button
        with main_ui:
            r_button.visible = True
            r_text.set_text("Add to current Results or press 'Start new Research'")
            sr_button.set_text("Add to current Results")
        # Handle papers (1-to-1 case)
        papers = data.get("papers", [])
        if not isinstance(papers, list):
            papers = [papers]

        # Get insights
        insights = data.get("insights", [])

        if save:
            history_entry = data.copy()
            history_entry['papers'] = [paper.model_dump_json() for paper in papers]
            if processor_instance is not None and processor_instance['instance'] is not None:
                history_entry["mam_name"] = processor_instance['instance'].mem_name
                history_entry["query"] = processor_instance['instance'].query

                history_entry["processor_memory"] = processor_instance['instance'].tools.get_memory(

                ).save_memory(history_entry["mam_name"], None)
            state['research_history'].append(history_entry)
            save_user_state(session_id, state)
        else:
            papers = [Paper(**json.loads(paper)) for paper in papers]
        create_followup_section(processor_instance, followup_card, session_id, balance)
        with main_ui:
            progress_card.visible = False
            # Build summary from insights
            summaries = []
            for insight in insights:
                if 'result' in insight and 'summary' in insight['result']:
                    if isinstance(insight['result']['summary'], str):
                        # print(insight['result']['summary'], "NEXT", json.loads(insight['result']['summary'][:-1]),"NEXT22",  type(json.loads(insight['result']['summary'][:-1])))
                        insight['result']['summary'] = json.loads(insight['result']['summary'][:-1])
                    main_summary = insight['result']['summary'].get('main_summary', '')
                    if main_summary:
                        summaries.append(main_summary)
            summary_text = "

".join(summaries) if summaries else "No summary available." summary_content.set_content(f"# Research Summary

{summary_text}")

            # Analysis section (unchanged if processor details haven't changed)
            if processor_instance["instance"] is not None:
                inst = processor_instance["instance"]
                analysis_md = (
                    f"# Analysis

" f"- query: {inst.query} " f"- chunk_size: {inst.chunk_size} " f"- overlap: {inst.overlap} " f"- max_workers: {inst.max_workers} " f"- num_search_result_per_query: {inst.nsrpq} " f"- max_search: {inst.max_search} " f"- download_dir: {inst.download_dir} " f"- mem_name: {inst.mem_name} " f"- current_session: {inst.current_session} " f"- all_ref_papers: {inst.all_ref_papers} " f"- all_texts_len: {inst.all_texts_len} " f"- final_texts_len: {inst.f_texts_len} " f"- num_workers: {inst.num_workers}" ) analysis_content.set_content(analysis_md)

            # References and Insights section
            references_md = "# References

" # Add papers references_md += " ".join( f"- ({i}) {getattr(paper, 'title', 'Unknown Title')}})" for i, paper in enumerate(papers) )

            # Add detailed insights
            references_md += "
Insights

" for i, insight in enumerate(insights): print(insight) result = insight.get('result', {}) summary = result.get('summary', {})

                if isinstance(summary, str):
                    summary = json.loads(summary)

                # Main summary
                references_md += f"
Insight

" references_md += f"### Main Summary {summary.get('main_summary', 'No summary available.')} "

                # Concept Analysis
                concept_analysis = summary.get('concept_analysis', {})
                if concept_analysis:
                    references_md += "
Concept Analysis

" references_md += "#### Key Concepts - " + " - ".join( concept_analysis.get('key_concepts', [])) + " " references_md += "

Relationships
  • " + "
  • ".join( concept_analysis.get('relationships', [])) + " " references_md += "
Importance Hierarchy
  • " + "
  • ".join( concept_analysis.get('importance_hierarchy', [])) + " "

                # Topic Insights
                topic_insights = summary.get('topic_insights', {})
                if topic_insights:
                    references_md += "
    
    Topic Insights

    " references_md += "#### Primary Topics - " + " - ".join( topic_insights.get('primary_topics', [])) + " " references_md += "

    Cross References
    • " + "
    • ".join( topic_insights.get('cross_references', [])) + " " references_md += "
    Knowledge Gaps
    • " + "
    • ".join( topic_insights.get('knowledge_gaps', [])) + " "

              # Relevance Assessment
              relevance = summary.get('relevance_assessment', {})
              if relevance:
                  references_md += "
      
      Relevance Assessment

      " references_md += f"- Query Alignment: {relevance.get('query_alignment', 'N/A')} " references_md += f"- Confidence Score: {relevance.get('confidence_score', 'N/A')} " references_md += f"- Coverage Analysis: {relevance.get('coverage_analysis', 'N/A')} "

          references_content.set_content(references_md)
      
          # nx concpts graph
          if processor_instance["instance"] is not None:
              create_graph_tab(
                  processor_instance,
                  graph_ui,main_ui
              )
      
          # Show results and followup cards
          results_card.visible = True
          followup_card.visible = True
          graph_ui.visible = True
      

      def load_history(index: int): entry = state['research_history'][index] if processor_instance is not None and processor_instance['instance'] is not None:

          processor_instance["instance"].mem_name = entry["mam_name"]
          processor_instance['instance'].query = entry["query"]
      
          pass
      else:
          processor = Processor(entry["query"], tools=get_tools(), **config)
          # Setze den Callback so, dass Updates in der GUI angezeigt werden
          processor.callback = update_status
          processor.mem_name = entry["mam_name"]
          processor_instance["instance"] = processor
      
      processor_instance["instance"].tools.get_memory().load_memory(entry["mam_name"], entry["processor_memory"])
      processor_instance["instance"].mem_name = entry["mam_name"]
      update_results(entry, save=False)
      

    return helpr

--- Stripe Integration ---

def regiser_stripe_integration(is_scc=True): def stripe_callback(request: Request):

    sid = request.row.query_params.get('session_id') if hasattr(request, 'row') else request.query_params.get('session_id')
    state = get_user_state(sid)

    if state['payment_id'] == '':
        with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
            ui.label(f"No payment id!").classes("text-lg font-bold")
            ui.button(
                "Start Research",
                on_click=lambda: ui.navigate.to("/open-Seeker.seek?session_id="+sid)
            ).classes(
                "w-full px-6 py-4 text-lg font-bold "
                "bg-primary hover:bg-primary-dark "
                "transform hover:-translate-y-0.5 "
                "transition-all duration-300 ease-in-out "
                "rounded-xl shadow-lg animate-slideUp"
            )
        return

    try:
        session_data = stripe.checkout.Session.retrieve(state['payment_id'])
    except Exception as e:
        with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
            ui.label(f"No Transactions Details !{e}").classes("text-lg font-bold")
            ui.button(
                "Start Research",
                on_click=lambda: ui.navigate.to("/open-Seeker.seek")
            ).classes(
                "w-full px-6 py-4 text-lg font-bold "
                "bg-primary hover:bg-primary-dark "
                "transform hover:-translate-y-0.5 "
                "transition-all duration-300 ease-in-out "
                "rounded-xl shadow-lg animate-slideUp"
            )
            return
    with ui.card().classes("w-full items-center").style("background-color: var(--background-color)"):
        if is_scc and state['payment_id'] != '' and session_data.payment_status == 'paid':
            state = get_user_state(sid)
            amount = session_data.amount_total / 100  # Convert cents to euros
            state['balance'] += amount
            state['payment_id'] = ''
            save_user_state(sid, state)

        # ui.navigate.to(f'/session?session={session}')
            ui.label(f"Transaction Complete - New balance :{state['balance']}").classes("text-lg font-bold")
            with ui.card().classes("w-full p-4").style("background-color: var(--background-color)"):
                ui.label("Private Session link (restore the session on a different device)")
                base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.seek' if not 'localhost' in os.getenv("HOSTNAME")else 'http://localhost:5000/gui/open-Seeker.seek'
                ui.label(f"{base_url}?session_id={sid}").style("white:100%")
                ui.label("Changes each time!")
        else:
            ui.label(f"Transaction Error! {session_data}, {dir(session_data)}").classes("text-lg font-bold")
        ui.button(
            "Start Research",
            on_click=lambda: ui.navigate.to("/open-Seeker.seek")
        ).classes(
            "w-full px-6 py-4 text-lg font-bold "
            "bg-primary hover:bg-primary-dark "
            "transform hover:-translate-y-0.5 "
            "transition-all duration-300 ease-in-out "
            "rounded-xl shadow-lg animate-slideUp"
        )


return stripe_callback

def handle_stripe_payment(amount: float, session_id): base_url = f'https://{os.getenv("HOSTNAME")}/gui/open-Seeker.stripe' if not 'localhost' in os.getenv("HOSTNAME") else 'http://localhost:5000/gui/open-Seeker.stripe' session = stripe.checkout.Session.create( payment_method_types=['card', "link", ], line_items=[{ 'price_data': { 'currency': 'eur', 'product_data': {'name': 'Research Credits'}, 'unit_amount': int(amount * 100), }, 'quantity': 1, }], automatic_tax={"enabled": True}, mode='payment', success_url=f'{base_url}?session_id={session_id}', cancel_url=f'{base_url}.error' ) state = get_user_state(session_id) state['payment_id'] = session.id save_user_state(session_id, state) ui.navigate.to(session.url, new_tab=True)

--- UI Components ---

def balance_overlay(session_id): with ui.dialog().classes('w-full max-w-md bg-white/20 backdrop-blur-lg rounded-xl') as dialog: with ui.card().classes('w-full p-6 space-y-4').style("background-color: var(--background-color)"): ui.label('Add Research Credits').classes('text-2xl font-bold') amount = ui.number('Amount (€) min 2', value=5, format='%.2f', min=2, max=9999, step=1).classes('w-full') with ui.row().classes('w-full justify-between'): ui.button('Cancel', on_click=dialog.close).props('flat') ui.button('Purchase', on_click=lambda: handle_stripe_payment(amount.value, session_id)) return dialog

def create_ui(processor): # ui_instance = register_nicegui("open-Seeker", create_landing_page , additional=""" """, show=False) register_nicegui("open-Seeker.demo", create_video_demo, additional=""" """, show=False)

newui

cleanup_module(app)

Cleanup resources when the module is unloaded

Source code in toolboxv2/mods/TruthSeeker/newui.py
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
@export(mod_name=MOD_NAME, version=version, exit_f=True)
def cleanup_module(app: App):
    """Cleanup resources when the module is unloaded"""
    # Clean up any temp files or resources
    import glob
    import shutil

    # Remove temporary PDF directories
    for pdf_dir in glob.glob("pdfs_*"):
        try:
            shutil.rmtree(pdf_dir)
        except Exception as e:
            print(f"Error removing directory {pdf_dir}: {str(e)}")

    # Clear any SSE queues
    if hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    if hasattr(app, 'payment_queues'):
        app.payment_queues = {}

    return Result.ok(info="ArXivPDFProcessor UI cleaned up")
create_payment(app, data) async

Create a Stripe payment session

Source code in toolboxv2/mods/TruthSeeker/newui.py
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
@export(mod_name=MOD_NAME, api=True, version=version)
async def create_payment(app: App, data):
    """Create a Stripe payment session"""
    amount = data.get("amount")
    session_id = data.get("session_id")

    if amount < 2:
        return Result.default_user_error(info="Minimum donation amount is €2")

    try:
        # Create a Stripe Checkout Session
        base_url = f"https://{os.getenv('HOSTNAME', 'localhost:5000')}"
        success_url = f"{base_url}/api/{MOD_NAME}/payment_success?session_id={session_id}"
        cancel_url = f"{base_url}/api/{MOD_NAME}/payment_cancel?session_id={session_id}"
        stripe = __import__('stripe')
        stripe.api_key = os.getenv('STRIPE_SECRET_KEY', 'sk_test_YourSecretKey')

        stripe_session = stripe.checkout.Session.create(
            payment_method_types=['card', 'link'],
            line_items=[{
                'price_data': {
                    'currency': 'eur',
                    'product_data': {'name': 'Research Credits'},
                    'unit_amount': int(amount * 100),
                },
                'quantity': 1,
            }],
            automatic_tax={"enabled": True},
            mode='payment',
            success_url=success_url,
            cancel_url=cancel_url
        )

        # Store the payment info
        if not hasattr(app, 'payment_info'):
            app.payment_info = {}

        # Initialize payment_queues if not already done
        if not hasattr(app, 'payment_queues'):
            app.payment_queues = {}

        # Create a queue for this payment
        app.payment_queues[session_id] = asyncio.Queue()

        app.payment_info[session_id] = {
            'payment_id': stripe_session.id,
            'amount': amount,
            'status': 'pending'
        }

        return Result.ok(data={"url": stripe_session.url})
    except Exception as e:
        return Result.default_internal_error(info=f"Error creating payment: {str(e)}")
estimate_processing(data) async

Estimate processing time and cost for a given query

Source code in toolboxv2/mods/TruthSeeker/newui.py
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
@export(mod_name=MOD_NAME, api=True, version=version)
async def estimate_processing(data):
    """Estimate processing time and cost for a given query"""
    # Use the static method to estimate metrics
    query, max_search, num_search_result_per_query= data.get("query", ""), data.get("max_search",4), data.get("num_search_result_per_query",6)
    estimated_time, estimated_price = ArXivPDFProcessor.estimate_processing_metrics(
        query_length=len(query),
        max_search=max_search,
        num_search_result_per_query=num_search_result_per_query,
        chunk_size=1_000_000,
        overlap=2_000,
        num_workers=None
    )

    return Result.ok(data={
        "time": estimated_time,
        "price": estimated_price
    })
follow_up_query(app, data) async

Ask a follow-up question about the research

Source code in toolboxv2/mods/TruthSeeker/newui.py
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
@export(mod_name=MOD_NAME, api=True, version=version)
async def follow_up_query(app: App, data):
    """Ask a follow-up question about the research"""
    research_id = data.get("research_id")
    query = data.get("query")

    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    if research_process['status'] != 'complete':
        return Result.default_user_error(info="Research is not complete")

    processor = research_process['processor']
    if not processor:
        return Result.default_user_error(info="Processor not available")

    try:
        # Use the extra_query method to ask follow-up questions
        result = await processor.extra_query(query)

        return Result.ok(data={"answer": result['response'] if result and 'response' in result else "No response"})
    except Exception as e:
        return Result.default_internal_error(info=f"Error processing follow-up query: {str(e)}")
initialize_module(app)

Initialize the module and register UI with CloudM

Source code in toolboxv2/mods/TruthSeeker/newui.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
@export(mod_name=MOD_NAME, version=version, initial=True)
def initialize_module(app: App):
    """Initialize the module and register UI with CloudM"""
    # Register the UI with CloudM
    app.run_any(("CloudM", "add_ui"),
                name="TruthSeeker",
                title="TruthSeeker Research",
                path=f"/api/{MOD_NAME}/get_main_ui",
                description="AI Research Assistant"
                )

    # Initialize SSE message queues
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}
    print("TruthSeeker online")
    return Result.ok(info="ArXivPDFProcessor UI initialized")
payment_cancel(app, session_id, request_as_kwarg=True, request=None) async

Handle cancelled payment

Source code in toolboxv2/mods/TruthSeeker/newui.py
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_cancel(app: App, session_id: str, request_as_kwarg=True, request=None):
    """Handle cancelled payment"""
    if hasattr(app, 'payment_info') and session_id in app.payment_info:
        app.payment_info[session_id]['status'] = 'cancelled'

        # Notify SSE clients about payment cancellation
        if hasattr(app, 'payment_queues') and session_id in app.payment_queues:
            await app.payment_queues[session_id].put({
                "status": "cancelled"
            })

    return Result.html(app.web_context() + """
    <div style="text-align: center; padding: 50px;">
        <h2>Payment Cancelled</h2>
        <p>Your payment was cancelled.</p>
        <script>
            setTimeout(function() {
                window.close();
            }, 3000);
        </script>
    </div>
    """)
payment_stream(app, session_id) async

SSE stream endpoint for payment status updates

Source code in toolboxv2/mods/TruthSeeker/newui.py
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_stream(app: App, session_id: str):
    """SSE stream endpoint for payment status updates"""
    if not hasattr(app, 'payment_queues'):
        app.payment_queues = {}

    # Create a message queue for this session_id if it doesn't exist
    if session_id not in app.payment_queues:
        app.payment_queues[session_id] = asyncio.Queue()

    async def generate():
        try:
            # Stream payment updates
            while True:
                try:
                    # Wait for a payment update with a timeout
                    payment_data = await asyncio.wait_for(app.payment_queues[session_id].get(), timeout=30)
                    yield f"event: payment_update\ndata: {json.dumps(payment_data)}\n\n"

                    # If the payment is complete or cancelled, exit the loop
                    if payment_data.get('status') in ['completed', 'cancelled']:
                        break
                except TimeoutError:
                    # Send a keep-alive comment to prevent connection timeout
                    yield ":\n\n"
        finally:
            # Clean up resources when the client disconnects
            if session_id in app.payment_queues:
                # Keep the queue for other potential clients
                pass

    return Result.stream(generate())
payment_success(app, session_id, request_as_kwarg=True, request=None) async

Handle successful payment

Source code in toolboxv2/mods/TruthSeeker/newui.py
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
@export(mod_name=MOD_NAME, api=True, version=version)
async def payment_success(app: App, session_id: str, request_as_kwarg=True, request=None):
    """Handle successful payment"""
    if not hasattr(app, 'payment_info') or session_id not in app.payment_info:
        return Result.html(app.web_context() + """
        <div style="text-align: center; padding: 50px;">
            <h2>Payment Session Not Found</h2>
            <p>Return to the main page to continue.</p>
            <a href="/" style="display: inline-block; margin-top: 20px; padding: 10px 20px; background-color: #4F46E5; color: white; text-decoration: none; border-radius: 5px;">Return to Home</a>
        </div>
        """)

    payment_info = app.payment_info[session_id]

    try:
        # Verify the payment with Stripe
        stripe = __import__('stripe')
        stripe.api_key = os.getenv('STRIPE_SECRET_KEY', 'sk_test_YourSecretKey')

        stripe_session = stripe.checkout.Session.retrieve(payment_info['payment_id'])

        if stripe_session.payment_status == 'paid':
            payment_info['status'] = 'completed'

            # Notify SSE clients about payment completion
            if hasattr(app, 'payment_queues') and session_id in app.payment_queues:
                await app.payment_queues[session_id].put({
                    "status": "completed",
                    "amount": payment_info['amount']
                })

            return Result.html(app.web_context() + """
            <div style="text-align: center; padding: 50px;">
                <h2>Thank You for Your Support!</h2>
                <p>Your payment was successful. You can now close this window and continue with your research.</p>
                <script>
                    setTimeout(function() {
                        window.close();
                    }, 5000);
                </script>
            </div>
            """)
        else:
            return Result.html(app.web_context() + """
            <div style="text-align: center; padding: 50px;">
                <h2>Payment Not Completed</h2>
                <p>Your payment has not been completed. Please try again.</p>
                <button onclick="window.close()">Close Window</button>
            </div>
            """)
    except Exception as e:
        return Result.html(app.web_context() + f"""
        <div style="text-align: center; padding: 50px;">
            <h2>Error Processing Payment</h2>
            <p>There was an error processing your payment: {str(e)}</p>
            <button onclick="window.close()">Close Window</button>
        </div>
        """)
research_results(app, research_id) async

Get the results of a completed research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
@export(mod_name=MOD_NAME, api=True, version=version)
async def research_results(app: App, research_id: str):
    """Get the results of a completed research process"""
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    if research_process['status'] != 'complete':
        return Result.default_user_error(info="Research is not complete")

    return Result.ok(data=research_process['results'])
research_status(app, research_id) async

Get the status of a research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
@export(mod_name=MOD_NAME, api=True, version=version)
async def research_status(app: App, research_id: str):
    """Get the status of a research process"""
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    research_process = app.research_processes[research_id]

    return Result.ok(data={
        "status": research_process['status'],
        "progress": research_process['progress'],
        "step": research_process['step'],
        "info": research_process['info']
    })
start_research(app, data) async

Start a new research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
@export(mod_name=MOD_NAME, api=True, version=version)
async def start_research(app: App, data):
    """Start a new research process"""
    # Get data from the request
    query = data.get("query")
    session_id = data.get("session_id")
    max_search = data.get("max_search", 4)
    num_search_result_per_query = data.get("num_search_result_per_query", 4)

    # Get the tools module
    tools = get_app("ArXivPDFProcessor").get_mod("isaa")
    if not hasattr(tools, 'initialized') or not tools.initialized:
        tools.init_isaa(build=True)

    # Generate a unique research_id
    research_id = str(uuid.uuid4())

    # Store the research information in a global dictionary
    if not hasattr(app, 'research_processes'):
        app.research_processes = {}

    # Initialize SSE queues if not already done
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    # Create a queue for this research process
    app.sse_queues[research_id] = asyncio.Queue()

    # Create a processor with callback for status updates
    app.research_processes[research_id] = {
        'status': 'initializing',
        'progress': 0.0,
        'step': 'Initializing',
        'info': '',
        'query': query,
        'session_id': session_id,
        'processor': None,
        'results': None,
        'stop_requested': False
    }

    # Define the callback function that sends updates to the SSE queue
    def status_callback(status_data):
        if research_id in app.research_processes:
            process = app.research_processes[research_id]
            process['status'] = 'processing'
            process['progress'] = status_data.get('progress', 0.0)
            process['step'] = status_data.get('step', '')
            process['info'] = status_data.get('info', '')

            # Put the status update in the SSE queue
            status_update = {
                "status": process['status'],
                "progress": process['progress'],
                "step": process['step'],
                "info": process['info']
            }

            if research_id in app.sse_queues:
                asyncio.create_task(app.sse_queues[research_id].put(status_update))

    # Create the processor
    processor = ArXivPDFProcessor(
        query=query,
        tools=tools,
        chunk_size=1_000_000,
        overlap=2_000,
        max_search=max_search,
        num_search_result_per_query=num_search_result_per_query,
        download_dir=f"pdfs_{research_id}",
        callback=status_callback
    )

    app.research_processes[research_id]['processor'] = processor

    # Process in the background
    async def process_in_background():
        try:
            # Check if stop was requested before starting
            if app.research_processes[research_id]['stop_requested']:
                app.research_processes[research_id]['status'] = 'stopped'
                if research_id in app.sse_queues:
                    await app.sse_queues[research_id].put({
                        "status": "stopped",
                        "progress": 0,
                        "step": "Research stopped",
                        "info": ""
                    })
                return

            # Start processing
            papers, insights = await processor.process()

            # Check if stop was requested during processing
            if app.research_processes[research_id]['stop_requested']:
                app.research_processes[research_id]['status'] = 'stopped'
                if research_id in app.sse_queues:
                    await app.sse_queues[research_id].put({
                        "status": "stopped",
                        "progress": 1,
                        "step": "Research stopped",
                        "info": ""
                    })
                return

            # Store results
            app.research_processes[research_id]['results'] = {
                'papers': papers,
                'insights': insights['response'] if insights and 'response' in insights else None
            }
            app.research_processes[research_id]['status'] = 'complete'

            # Send final status update
            if research_id in app.sse_queues:
                await app.sse_queues[research_id].put({
                    "status": "complete",
                    "progress": 1,
                    "step": "Research complete",
                    "info": f"Found {len(papers)} papers"
                })

        except Exception as e:
            app.research_processes[research_id]['status'] = 'error'
            app.research_processes[research_id]['info'] = str(e)

            # Send error status
            if research_id in app.sse_queues:
                await app.sse_queues[research_id].put({
                    "status": "error",
                    "progress": 0,
                    "step": "Error",
                    "info": str(e)
                })

            print(f"Error in research process {research_id}: {str(e)}")

    # Start the background task
    asyncio.create_task(process_in_background())

    return Result.ok(data={"research_id": research_id})
status_stream(app, research_id) async

SSE stream endpoint for research status updates

Source code in toolboxv2/mods/TruthSeeker/newui.py
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
@export(mod_name=MOD_NAME, api=True, version=version)
async def status_stream(app: App, research_id: str):
    """SSE stream endpoint for research status updates"""
    if not hasattr(app, 'sse_queues'):
        app.sse_queues = {}

    # Create a message queue for this research_id if it doesn't exist
    if research_id not in app.sse_queues:
        app.sse_queues[research_id] = asyncio.Queue()

    async def generate():
        # Send initial status
        if hasattr(app, 'research_processes') and research_id in app.research_processes:
            process = app.research_processes[research_id]
            initial_status = {
                "status": process['status'],
                "progress": process['progress'],
                "step": process['step'],
                "info": process['info']
            }
            yield f"event: status_update\ndata: {json.dumps(initial_status)}\n\n"

        try:
            # Stream status updates
            while True:
                try:
                    # Wait for a new status update with a timeout
                    status_data = await asyncio.wait_for(app.sse_queues[research_id].get(), timeout=30)
                    yield f"event: status_update\ndata: {json.dumps(status_data)}\n\n"

                    # If the research is complete or there was an error, exit the loop
                    if status_data.get('status') in ['complete', 'error', 'stopped']:
                        break
                except TimeoutError:
                    # Send a keep-alive comment to prevent connection timeout
                    yield ":\n\n"
        finally:
            # Clean up resources when the client disconnects
            if research_id in app.sse_queues:
                # Keep the queue for other potential clients
                pass

    return Result.stream(generate())
stop_research(app, data) async

Stop a research process

Source code in toolboxv2/mods/TruthSeeker/newui.py
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
@export(mod_name=MOD_NAME, api=True, version=version)
async def stop_research(app: App, data):
    """Stop a research process"""
    research_id = data.get("research_id")
    if not hasattr(app, 'research_processes') or research_id not in app.research_processes:
        return Result.default_user_error(info="Research process not found")

    app.research_processes[research_id]['stop_requested'] = True

    # Send stopped status to SSE clients
    if hasattr(app, 'sse_queues') and research_id in app.sse_queues:
        await app.sse_queues[research_id].put({
            "status": "stopped",
            "progress": app.research_processes[research_id]['progress'],
            "step": "Stopping research",
            "info": ""
        })

    return Result.ok(data={"status": "stop_requested"})

tests

TestTruthSeeker

Bases: TestCase

Source code in toolboxv2/mods/TruthSeeker/tests.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
class TestTruthSeeker(unittest.TestCase):
    def setUp(self):
        # Mock the App class
        self.mock_app = Mock()
        self.mock_app.get_mod.return_value = Mock()

        # Setup mock for run_any that returns iterable dict
        self.mock_app.run_any.return_value = {
            "1": {"name": "template1"},
            "2": {"name": "template2"}
        }

        # Mock RequestSession
        self.mock_request = Mock()
        self.mock_request.json = AsyncMock()

    @patch('os.path.join')
    @patch('builtins.open', create=True)
    def test_start_initialization(self, mock_open, mock_join):
        """Test the start function initializes correctly"""
        # Setup mock file handling
        mock_file = Mock()
        mock_file.read.return_value = "test content"
        mock_open.return_value.__enter__.return_value = mock_file

        # Call start function
        start(self.mock_app)

        # Verify app initialization calls
        self.mock_app.get_mod.assert_called_with("CodeVerification")
        self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker")
        self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker-promo")

    @async_test
    async def test_codes_valid_request(self):
        """Test the codes function with valid input"""
        # Mock request data
        test_data = {
            "query": "test query",
            "depth": "Q",
            "promoCode": "PROMO15",
            "ontimeCode": "TEST123"
        }
        self.mock_request.json.return_value = test_data

        # Mock code verification
        self.mock_app.run_any.return_value = {
            "template_name": "Promo15",
            "usage_type": "one_time"
        }

        result = await codes(self.mock_app, self.mock_request)

        self.assertTrue(result['valid'])
        self.assertIn('ontimeKey', result)
        self.assertIn('ppc', result)

    @async_test
    async def test_codes_invalid_promo(self):
        """Test the codes function with invalid promo code"""
        test_data = {
            "query": "test query",
            "depth": "I",
            "promoCode": "INVALID",
            "ontimeCode": "TEST123"
        }
        self.mock_request.json.return_value = test_data

        # Mock invalid promo code verification
        self.mock_app.run_any.return_value = None

        result = await codes(self.mock_app, self.mock_request)

        self.assertIn('ppc', result)
        self.assertTrue(result['ppc']['price'] > 0)

    @async_test
    async def test_process_valid_request(self):
        """Test the process function with valid input"""
        test_data = {
            "query": "test query",
            "depth": "Q",
            "ontimeKey": "VALID_KEY",
            "email": "test@example.com"
        }
        self.mock_request.json.return_value = test_data

        # Mock valid key verification
        self.mock_app.run_any.return_value = {
            "template_name": "PROCESS",
            "usage_type": "timed",
            "uses_count": 1
        }

        # Mock ArXivPDFProcessor
        with patch('toolboxv2.mods.TruthSeeker.module.ArXivPDFProcessor') as mock_processor:
            mock_insights = MagicMock()
            mock_insights.is_true = "True"
            mock_insights.summary = "Test summary"
            mock_insights.key_point = "Point1>\n\n<Point2"

            mock_processor.return_value.process.return_value = ([], mock_insights)

            result = await process(self.mock_app, self.mock_request)

            self.assertEqual(result['is_true'], "True")
            self.assertEqual(result['summary'], "Test summary")

    @async_test
    async def test_process_invalid_key(self):
        """Test the process function with invalid key"""
        test_data = {
            "query": "test query",
            "depth": "Q",
            "ontimeKey": "INVALID_KEY",
            "email": "test@example.com"
        }
        self.mock_request.json.return_value = test_data

        # Mock invalid key verification
        self.mock_app.run_any.return_value = None

        result = await process(self.mock_app, self.mock_request)

        self.assertEqual(result['summary'], "INVALID QUERY")
        self.assertEqual(result['insights'], [])
        self.assertEqual(result['papers'], [])

    def test_byCode_functionality(self):
        """Test the byCode function"""
        test_request = Mock()
        test_request.json.return_value = ["payKey", "codeClass", "ontimeKey"]

        result = byCode(self.mock_app, test_request)

        self.assertEqual(result, {'code': 'code'})
test_byCode_functionality()

Test the byCode function

Source code in toolboxv2/mods/TruthSeeker/tests.py
337
338
339
340
341
342
343
344
def test_byCode_functionality(self):
    """Test the byCode function"""
    test_request = Mock()
    test_request.json.return_value = ["payKey", "codeClass", "ontimeKey"]

    result = byCode(self.mock_app, test_request)

    self.assertEqual(result, {'code': 'code'})
test_codes_invalid_promo() async

Test the codes function with invalid promo code

Source code in toolboxv2/mods/TruthSeeker/tests.py
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
@async_test
async def test_codes_invalid_promo(self):
    """Test the codes function with invalid promo code"""
    test_data = {
        "query": "test query",
        "depth": "I",
        "promoCode": "INVALID",
        "ontimeCode": "TEST123"
    }
    self.mock_request.json.return_value = test_data

    # Mock invalid promo code verification
    self.mock_app.run_any.return_value = None

    result = await codes(self.mock_app, self.mock_request)

    self.assertIn('ppc', result)
    self.assertTrue(result['ppc']['price'] > 0)
test_codes_valid_request() async

Test the codes function with valid input

Source code in toolboxv2/mods/TruthSeeker/tests.py
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
@async_test
async def test_codes_valid_request(self):
    """Test the codes function with valid input"""
    # Mock request data
    test_data = {
        "query": "test query",
        "depth": "Q",
        "promoCode": "PROMO15",
        "ontimeCode": "TEST123"
    }
    self.mock_request.json.return_value = test_data

    # Mock code verification
    self.mock_app.run_any.return_value = {
        "template_name": "Promo15",
        "usage_type": "one_time"
    }

    result = await codes(self.mock_app, self.mock_request)

    self.assertTrue(result['valid'])
    self.assertIn('ontimeKey', result)
    self.assertIn('ppc', result)
test_process_invalid_key() async

Test the process function with invalid key

Source code in toolboxv2/mods/TruthSeeker/tests.py
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
@async_test
async def test_process_invalid_key(self):
    """Test the process function with invalid key"""
    test_data = {
        "query": "test query",
        "depth": "Q",
        "ontimeKey": "INVALID_KEY",
        "email": "test@example.com"
    }
    self.mock_request.json.return_value = test_data

    # Mock invalid key verification
    self.mock_app.run_any.return_value = None

    result = await process(self.mock_app, self.mock_request)

    self.assertEqual(result['summary'], "INVALID QUERY")
    self.assertEqual(result['insights'], [])
    self.assertEqual(result['papers'], [])
test_process_valid_request() async

Test the process function with valid input

Source code in toolboxv2/mods/TruthSeeker/tests.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
@async_test
async def test_process_valid_request(self):
    """Test the process function with valid input"""
    test_data = {
        "query": "test query",
        "depth": "Q",
        "ontimeKey": "VALID_KEY",
        "email": "test@example.com"
    }
    self.mock_request.json.return_value = test_data

    # Mock valid key verification
    self.mock_app.run_any.return_value = {
        "template_name": "PROCESS",
        "usage_type": "timed",
        "uses_count": 1
    }

    # Mock ArXivPDFProcessor
    with patch('toolboxv2.mods.TruthSeeker.module.ArXivPDFProcessor') as mock_processor:
        mock_insights = MagicMock()
        mock_insights.is_true = "True"
        mock_insights.summary = "Test summary"
        mock_insights.key_point = "Point1>\n\n<Point2"

        mock_processor.return_value.process.return_value = ([], mock_insights)

        result = await process(self.mock_app, self.mock_request)

        self.assertEqual(result['is_true'], "True")
        self.assertEqual(result['summary'], "Test summary")
test_start_initialization(mock_open, mock_join)

Test the start function initializes correctly

Source code in toolboxv2/mods/TruthSeeker/tests.py
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
@patch('os.path.join')
@patch('builtins.open', create=True)
def test_start_initialization(self, mock_open, mock_join):
    """Test the start function initializes correctly"""
    # Setup mock file handling
    mock_file = Mock()
    mock_file.read.return_value = "test content"
    mock_open.return_value.__enter__.return_value = mock_file

    # Call start function
    start(self.mock_app)

    # Verify app initialization calls
    self.mock_app.get_mod.assert_called_with("CodeVerification")
    self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker")
    self.mock_app.run_any.assert_any_call(("CodeVerification", "init_scope"), scope="TruthSeeker-promo")
run_all_tests()

Run all test classes

Source code in toolboxv2/mods/TruthSeeker/tests.py
393
394
395
396
@default_test
def run_all_tests():
    """Run all test classes"""
    return run_test_suite()
run_arxiv_processor_tests(test_name=None)

Run TestArXivPDFProcessor tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
380
381
382
def run_arxiv_processor_tests(test_name=None):
    """Run TestArXivPDFProcessor tests"""
    return run_test_suite(TestArXivPDFProcessor, test_name)
run_pdf_downloader_tests(test_name=None)

Run TestRobustPDFDownloader tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
375
376
377
def run_pdf_downloader_tests(test_name=None):
    """Run TestRobustPDFDownloader tests"""
    return run_test_suite(TestRobustPDFDownloader, test_name)
run_specific_test(test_class, test_name)

Run a specific test from a test class

Source code in toolboxv2/mods/TruthSeeker/tests.py
389
390
391
def run_specific_test(test_class, test_name):
    """Run a specific test from a test class"""
    return run_test_suite(test_class, test_name)
run_test_suite(test_class=None, test_name=None, verbosity=2)

Run specific test class or test case.

Parameters:

Name Type Description Default
test_class

The test class to run (optional)

None
test_name

Specific test method name to run (optional)

None
verbosity

Output detail level (default=2)

2

Returns:

Type Description

TestResult object

Source code in toolboxv2/mods/TruthSeeker/tests.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
def run_test_suite(test_class=None, test_name=None, verbosity=2):
    """
    Run specific test class or test case.

    Args:
        test_class: The test class to run (optional)
        test_name: Specific test method name to run (optional)
        verbosity: Output detail level (default=2)

    Returns:
        TestResult object
    """
    loader = unittest.TestLoader()
    suite = unittest.TestSuite()

    if test_class and test_name:
        # Run specific test method
        suite.addTest(test_class(test_name))
    elif test_class:
        # Run all tests in the class
        suite.addTests(loader.loadTestsFromTestCase(test_class))
    else:
        # Run all tests
        suite.addTests(loader.loadTestsFromModule(sys.modules[__name__]))

    runner = unittest.TextTestRunner(verbosity=verbosity)
    return runner.run(suite)
run_truth_seeker_tests(test_name=None)

Run TestTruthSeeker tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
384
385
386
def run_truth_seeker_tests(test_name=None):
    """Run TestTruthSeeker tests"""
    return run_test_suite(TestTruthSeeker, test_name)

Run only ArXiv search tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
414
415
416
417
418
419
420
@default_test
def test_arxiv_search():
    """Run only ArXiv search tests"""
    return run_specific_test(
        TestArXivPDFProcessor,
        'test_search_and_process_papers'
    )
test_pdf_download()

Run only PDF download tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
398
399
400
401
402
403
404
@default_test
def test_pdf_download():
    """Run only PDF download tests"""
    return run_specific_test(
        TestRobustPDFDownloader,
        'test_download_pdf_success'
    )
test_truth_seeker()

Run only PDF download tests

Source code in toolboxv2/mods/TruthSeeker/tests.py
406
407
408
409
410
411
412
@default_test
def test_truth_seeker():
    """Run only PDF download tests"""
    return run_specific_test(
        TestTruthSeeker,
        'test_truth_seeker_success'
    )

UltimateTTT

UltimateTTTGameEngine

Source code in toolboxv2/mods/UltimateTTT.py
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
class UltimateTTTGameEngine:  # Renamed for clarity
    def __init__(self, game_state: GameState):
        self.gs = game_state
        self.size = game_state.config.grid_size

    def _check_line_for_win(self, line: list[CellState | BoardWinner],
                            symbol_to_check: CellState | BoardWinner) -> bool:
        if not line or line[0] == CellState.EMPTY or line[0] == BoardWinner.NONE:
            return False
        return all(cell == symbol_to_check for cell in line)

    def _get_board_winner_symbol(self, board: list[list[CellState | BoardWinner]],
                                 symbol_class: type[CellState] | type[BoardWinner]) -> CellState | BoardWinner | None:
        symbols_to_try = [symbol_class.X, symbol_class.O]
        for symbol in symbols_to_try:
            # Rows
            for r in range(self.size):
                if self._check_line_for_win([board[r][c] for c in range(self.size)], symbol): return symbol
            # Columns
            for c in range(self.size):
                if self._check_line_for_win([board[r][c] for r in range(self.size)], symbol): return symbol
            # Diagonals
            if self._check_line_for_win([board[i][i] for i in range(self.size)], symbol): return symbol
            if self._check_line_for_win([board[i][self.size - 1 - i] for i in range(self.size)], symbol): return symbol
        return None  # No winner

    def _is_board_full(self, board: list[list[CellState | BoardWinner]],
                       empty_value: CellState | BoardWinner) -> bool:
        return all(cell != empty_value for row in board for cell in row)

    def _determine_local_board_result(self, global_r: int, global_c: int) -> BoardWinner:
        if self.gs.global_board_winners[global_r][global_c] != BoardWinner.NONE:
            return self.gs.global_board_winners[global_r][global_c]

        local_board_cells = self.gs.local_boards_state[global_r][global_c]
        winner_symbol = self._get_board_winner_symbol(local_board_cells, CellState)
        if winner_symbol:
            return BoardWinner(winner_symbol.value)  # Convert CellState.X to BoardWinner.X
        if self._is_board_full(local_board_cells, CellState.EMPTY):
            return BoardWinner.DRAW
        return BoardWinner.NONE

    def _update_local_winner_and_check_global(self, global_r: int, global_c: int):
        new_local_winner = self._determine_local_board_result(global_r, global_c)
        if new_local_winner != BoardWinner.NONE and self.gs.global_board_winners[global_r][
            global_c] == BoardWinner.NONE:
            self.gs.global_board_winners[global_r][global_c] = new_local_winner
            self._check_for_overall_game_end()

    def _check_for_overall_game_end(self):
        if self.gs.status == GameStatus.FINISHED: return

        winner_board_symbol = self._get_board_winner_symbol(self.gs.global_board_winners, BoardWinner)
        if winner_board_symbol:  # This is BoardWinner.X or BoardWinner.O
            self.gs.overall_winner_symbol = PlayerSymbol(winner_board_symbol.value)  # Convert to PlayerSymbol
            self.gs.status = GameStatus.FINISHED
            return

        if self._is_board_full(self.gs.global_board_winners, BoardWinner.NONE):
            self.gs.is_draw = True
            self.gs.status = GameStatus.FINISHED

    def _determine_next_forced_board(self, last_move_local_r: int, last_move_local_c: int) -> tuple[int, int] | None:
        target_gr, target_gc = last_move_local_r, last_move_local_c

        if self.gs.global_board_winners[target_gr][target_gc] == BoardWinner.NONE and \
            not self._is_local_board_full(self.gs.local_boards_state[target_gr][target_gc], CellState.EMPTY):
            return (target_gr, target_gc)
        return None  # Play anywhere valid

    def _is_local_board_full(self, local_board_cells: list[list[CellState]], cell_type=CellState.EMPTY) -> bool:
        """Checks if a specific local board (passed as a 2D list of CellState) is full."""
        for r in range(self.size):
            for c in range(self.size):
                if local_board_cells[r][c] == cell_type:
                    return False
        return True

    def add_player(self, player_id: str, player_name: str,
                   is_npc: bool = False, npc_difficulty: NPCDifficulty | None = None) -> bool:
        if len(self.gs.players) >= 2:
            self.gs.last_error_message = "Game is already full (2 players max)."
            return False

        # Reconnect logic for existing player (human or NPC if that makes sense)
        existing_player = self.gs.get_player_info(player_id)
        if existing_player:
            if not existing_player.is_connected:
                existing_player.is_connected = True
                # If NPC "reconnects", ensure its properties are correct (though unlikely scenario for NPC)
                if is_npc:
                    existing_player.is_npc = True
                    existing_player.npc_difficulty = npc_difficulty
                    existing_player.name = player_name  # Update name if it changed for NPC

                self.gs.last_error_message = None
                self.gs.updated_at = datetime.now(UTC)

                if len(self.gs.players) == 2 and all(p.is_connected for p in self.gs.players) and \
                    self.gs.status == GameStatus.WAITING_FOR_OPPONENT:  # Should not be waiting if NPC is P2
                    self.gs.status = GameStatus.IN_PROGRESS
                    player_x_info = next(p for p in self.gs.players if p.symbol == PlayerSymbol.X)
                    self.gs.current_player_id = player_x_info.id
                    self.gs.waiting_since = None
                return True
            else:  # Player ID exists and is already connected
                self.gs.last_error_message = f"Player with ID {player_id} is already in the game and connected."
                return False

        # Adding a new player
        symbol = PlayerSymbol.X if not self.gs.players else PlayerSymbol.O

        # Construct PlayerInfo with NPC details if applicable
        player_info_data = {
            "id": player_id,
            "symbol": symbol,
            "name": player_name,
            "is_connected": True,  # NPCs are always "connected"
            "is_npc": is_npc
        }
        if is_npc and npc_difficulty:
            player_info_data["npc_difficulty"] = npc_difficulty

        new_player = PlayerInfo(**player_info_data)
        self.gs.players.append(new_player)
        self.gs.last_error_message = None

        if len(self.gs.players) == 1:  # First player added
            if self.gs.mode == GameMode.ONLINE:
                self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                self.gs.current_player_id = player_id
                self.gs.waiting_since = datetime.now(UTC)
            # For local mode with P1, we wait for P2 (human or NPC) to be added
            # No status change yet, current_player_id not set until P2 joins

        elif len(self.gs.players) == 2:  # Both players now present
            self.gs.status = GameStatus.IN_PROGRESS
            player_x_info = next(p for p in self.gs.players if p.symbol == PlayerSymbol.X)
            self.gs.current_player_id = player_x_info.id  # X always starts
            self.gs.next_forced_global_board = None
            self.gs.waiting_since = None

            # If the second player added is an NPC and it's their turn (e.g. P1 is human, P2 is NPC, P1 made a move)
            # This specific logic is more for when make_move hands over to an NPC.
            # Here, we just set up the game. X (P1) will make the first move.

        self.gs.updated_at = datetime.now(UTC)
        return True

    def make_move(self, move: Move) -> bool:
        self.gs.last_error_message = None

        if self.gs.status != GameStatus.IN_PROGRESS:
            self.gs.last_error_message = "Game is not in progress."
            return False

        player_info = self.gs.get_player_info(move.player_id)
        if not player_info or move.player_id != self.gs.current_player_id:
            self.gs.last_error_message = "Not your turn or invalid player."
            return False

        s = self.size
        if not (0 <= move.global_row < s and 0 <= move.global_col < s and \
                0 <= move.local_row < s and 0 <= move.local_col < s):
            self.gs.last_error_message = f"Coordinates out of bounds for {s}x{s} grid."
            return False

        gr, gc, lr, lc = move.global_row, move.global_col, move.local_row, move.local_col

        if self.gs.next_forced_global_board and (gr, gc) != self.gs.next_forced_global_board:
            self.gs.last_error_message = f"Must play in global board {self.gs.next_forced_global_board}."
            return False

        if self.gs.global_board_winners[gr][gc] != BoardWinner.NONE:
            self.gs.last_error_message = f"Local board ({gr},{gc}) is already decided."
            return False
        if self.gs.local_boards_state[gr][gc][lr][lc] != CellState.EMPTY:
            self.gs.last_error_message = f"Cell ({gr},{gc})-({lr},{lc}) is already empty."  # Should be 'not empty' or 'occupied'
            # Correction:
            self.gs.last_error_message = f"Cell ({gr},{gc})-({lr},{lc}) is already occupied."
            return False

        self.gs.local_boards_state[gr][gc][lr][lc] = CellState(player_info.symbol.value)
        self.gs.moves_history.append(move)

        self._update_local_winner_and_check_global(gr, gc)

        if self.gs.status == GameStatus.FINISHED:
            self.gs.next_forced_global_board = None
        else:
            opponent_info = self.gs.get_opponent_info(self.gs.current_player_id)
            self.gs.current_player_id = opponent_info.id
            self.gs.next_forced_global_board = self._determine_next_forced_board(lr, lc)

            if self.gs.next_forced_global_board is None:
                is_any_move_possible = any(
                    self.gs.global_board_winners[r_idx][c_idx] == BoardWinner.NONE and \
                    not self._is_local_board_full(self.gs.local_boards_state[r_idx][c_idx], CellState.EMPTY)
                    for r_idx in range(s) for c_idx in range(s)
                )
                if not is_any_move_possible:
                    self._check_for_overall_game_end()
                    if self.gs.status != GameStatus.FINISHED:
                        self.gs.is_draw = True
                        self.gs.status = GameStatus.FINISHED

        self.gs.updated_at = datetime.now(UTC)
        self.gs.last_made_move_coords = (move.global_row, move.global_col, move.local_row, move.local_col)

        return True

    def handle_player_disconnect(self, player_id: str):
        player = self.gs.get_player_info(player_id)
        app = get_app(GAME_NAME)  # Hol dir die App-Instanz
        if player:
            if not player.is_connected:  # Already marked as disconnected
                app.logger.info(f"Player {player_id} was already marked as disconnected from game {self.gs.game_id}.")
                return

            player.is_connected = False
            self.gs.updated_at = datetime.now(UTC)
            app.logger.info(f"Player {player_id} disconnected from game {self.gs.game_id}. Name: {player.name}")

            if self.gs.mode == GameMode.ONLINE:
                if self.gs.status == GameStatus.IN_PROGRESS:
                    opponent = self.gs.get_opponent_info(player_id)
                    if opponent and opponent.is_connected:
                        self.gs.status = GameStatus.ABORTED  # Use ABORTED as "paused"
                        self.gs.player_who_paused = player_id  # Store who disconnected
                        # This message is for the game state, will be seen by the other player via SSE
                        self.gs.last_error_message = f"Player {player.name} disconnected. Waiting for them to rejoin."
                        app.logger.info(
                            f"Game {self.gs.game_id} PAUSED, waiting for {player.name} ({player_id}) to reconnect.")
                    else:
                        # Opponent also disconnected or was already gone
                        self.gs.status = GameStatus.ABORTED
                        self.gs.last_error_message = "Both players disconnected. Game aborted."
                        self.gs.player_who_paused = None  # No specific player to wait for
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, both players (or last active player) disconnected.")
                elif self.gs.status == GameStatus.WAITING_FOR_OPPONENT:
                    # If the creator (P1) disconnects while waiting for P2
                    if len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                        self.gs.status = GameStatus.ABORTED
                        self.gs.last_error_message = "Game creator disconnected before opponent joined. Game aborted."
                        self.gs.player_who_paused = None
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, creator {player.name} ({player_id}) disconnected while WAITING_FOR_OPPONENT.")
                elif self.gs.status == GameStatus.ABORTED and self.gs.player_who_paused:
                    # Game was already paused (e.g. P1 disconnected), and now P2 (the waiting one) disconnects
                    if self.gs.player_who_paused != player_id:  # Ensure it's the other player
                        self.gs.last_error_message = "Other player also disconnected during pause. Game aborted."
                        self.gs.player_who_paused = None  # No one specific to wait for now
                        app.logger.info(
                            f"Game {self.gs.game_id} ABORTED, waiting player {player.name} ({player_id}) disconnected.")

    def handle_player_reconnect(self, player_id: str) -> bool:
        player = self.gs.get_player_info(player_id)
        app = get_app(GAME_NAME)
        if not player:
            app.logger.warning(f"Reconnect attempt for unknown player {player_id} in game {self.gs.game_id}.")
            return False

        if player.is_connected:
            app.logger.info(
                f"Player {player.name} ({player_id}) attempted reconnect but was already marked as connected to game {self.gs.game_id}.")
            if self.gs.status == GameStatus.ABORTED and self.gs.player_who_paused == player_id:
                opponent = self.gs.get_opponent_info(player_id)
                if opponent and opponent.is_connected:
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = f"Connection for {player.name} re-established. Game resumed."
                    self.gs.player_who_paused = None
                    self.gs.updated_at = datetime.now(UTC)
                    app.logger.info(
                        f"Game {self.gs.game_id} resumed as already-connected pauser {player.name} re-interacted.")
                else:
                    self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent is still not connected."
            return True

        player.is_connected = True
        self.gs.updated_at = datetime.now(UTC)
        app.logger.info(
            f"Player {player.name} ({player_id}) reconnected to game {self.gs.game_id}. Previous status: {self.gs.status}, Paused by: {self.gs.player_who_paused}")

        if self.gs.status == GameStatus.ABORTED:
            if self.gs.player_who_paused == player_id:  # The player who caused the pause has reconnected
                opponent = self.gs.get_opponent_info(player_id)
                if opponent and opponent.is_connected:
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = f"Player {player.name} reconnected. Game resumed!"
                    self.gs.player_who_paused = None
                    app.logger.info(
                        f"Game {self.gs.game_id} RESUMED. Pauser {player.name} reconnected, opponent {opponent.name} is present.")
                else:  # Pauser reconnected, opponent (still) gone or never joined (if P1 disconnected from WAITING)
                    if not opponent and len(
                        self.gs.players) == 1:  # P1 reconnected to a game they created but no P2 yet
                        self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                        self.gs.player_who_paused = None
                        self.gs.current_player_id = player_id
                        self.gs.last_error_message = f"Creator {player.name} reconnected. Waiting for opponent."
                        self.gs.waiting_since = datetime.now(UTC)  # Reset waiting timer
                    elif opponent:  # Opponent was there but is now disconnected
                        self.gs.player_who_paused = opponent.id  # Now waiting for the other person
                        self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent ({opponent.name}) is not connected. Game remains paused."
                        app.logger.info(
                            f"Game {self.gs.game_id} still PAUSED. {player.name} reconnected, but opponent {opponent.name} is NOT. Waiting for {opponent.name}.")
                    else:  # Should be rare: 2 players in list, but opponent object not found for P1
                        self.gs.last_error_message = f"Welcome back, {player.name}! Opponent details unclear. Game remains paused."


            elif self.gs.player_who_paused and self.gs.player_who_paused != player_id:
                # The *other* player reconnected, while game was paused for initial pauser.
                initial_pauser_info = self.gs.get_player_info(self.gs.player_who_paused)
                if initial_pauser_info and initial_pauser_info.is_connected:  # This implies both are now connected.
                    self.gs.status = GameStatus.IN_PROGRESS
                    self.gs.last_error_message = "Both players are now connected. Game resumed!"
                    self.gs.player_who_paused = None
                    app.logger.info(
                        f"Game {self.gs.game_id} RESUMED. Waiting player {player.name} reconnected, initial pauser {initial_pauser_info.name} also present.")
                else:
                    self.gs.last_error_message = f"Welcome back, {player.name}! Still waiting for {initial_pauser_info.name if initial_pauser_info else 'the other player'} to reconnect."
                    app.logger.info(
                        f"Game {self.gs.game_id} still PAUSED. Player {player.name} reconnected, but still waiting for original pauser {self.gs.player_who_paused}.")

            else:  # game is ABORTED but no specific player_who_paused (hard abort by timeout or both disconnected)
                if len(self.gs.players) == 2:  # Was a two-player game
                    opponent = self.gs.get_opponent_info(player_id)
                    if opponent:
                        # Revive the game to a paused state, waiting for the other player
                        self.gs.player_who_paused = opponent.id
                        self.gs.status = GameStatus.ABORTED  # Remains aborted, but now specifically for opponent
                        self.gs.last_error_message = f"Welcome back, {player.name}! Game was fully aborted. Now waiting for {opponent.name} to rejoin."
                        app.logger.info(
                            f"Game {self.gs.game_id} REVIVED from HARD ABORT by {player.name}. Now paused, waiting for {opponent.name} ({opponent.id}).")
                    else:  # Should not happen if two players were in game and player_id is one of them
                        self.gs.last_error_message = f"Player {player.name} reconnected, but game state is inconsistent (opponent not found)."
                        app.logger.warning(
                            f"Game {self.gs.game_id} HARD ABORT revival by {player.name} failed, opponent info missing.")
                elif len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                    # P1 created, P1 disconnected, game WAITING_FOR_OPPONENT timed out & hard aborted. P1 tries to rejoin.
                    self.gs.status = GameStatus.WAITING_FOR_OPPONENT
                    self.gs.player_who_paused = None
                    self.gs.current_player_id = player_id
                    self.gs.last_error_message = f"Creator {player.name} reconnected. Waiting for opponent."
                    self.gs.waiting_since = datetime.now(UTC)  # Reset waiting timer
                    app.logger.info(
                        f"Game {self.gs.game_id} (previously hard aborted while waiting) revived by creator {player.name}. Now WAITING_FOR_OPPONENT.")
                else:
                    self.gs.last_error_message = f"Player {player.name} reconnected, but the game was aborted and cannot be revived in its current player configuration."
                    app.logger.info(
                        f"Game {self.gs.game_id} HARD ABORTED. Player {player.name} reconnected, but game cannot resume in current configuration.")


        elif self.gs.status == GameStatus.IN_PROGRESS:
            opponent = self.gs.get_opponent_info(player_id)
            if not opponent or not opponent.is_connected:
                self.gs.status = GameStatus.ABORTED
                self.gs.player_who_paused = opponent.id if opponent else None
                self.gs.last_error_message = f"Welcome back, {player.name}! Your opponent disconnected while you were away. Waiting for them."
                app.logger.info(
                    f"Game {self.gs.game_id} transitions to PAUSED. {player.name} reconnected to IN_PROGRESS, but opponent {opponent.id if opponent else 'N/A'} is gone.")
            else:
                self.gs.last_error_message = f"Player {player.name} re-established connection during active game."
                app.logger.info(
                    f"Player {player.name} ({player_id}) re-established connection to IN_PROGRESS game {self.gs.game_id}.")

        elif self.gs.status == GameStatus.WAITING_FOR_OPPONENT:
            if len(self.gs.players) == 1 and self.gs.players[0].id == player_id:
                self.gs.last_error_message = f"Creator {player.name} reconnected. Still waiting for opponent."
                self.gs.current_player_id = player_id
                self.gs.waiting_since = datetime.now(UTC)  # Reset waiting timer
                app.logger.info(
                    f"Creator {player.name} ({player_id}) reconnected to WAITING_FOR_OPPONENT game {self.gs.game_id}.")
            else:
                app.logger.warning(
                    f"Non-creator {player.name} or unexpected player count for reconnect to WAITING_FOR_OPPONENT game {self.gs.game_id}.")

        return True

WebSocketManager

Tools

Bases: MainTool

Production-ready WebSocketManager Tool.

Source code in toolboxv2/mods/WebSocketManager.py
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
class Tools(MainTool):
    """Production-ready WebSocketManager Tool."""

    def __init__(self, app=None):
        self.version = "2.0.0"
        self.name = "WebSocketManager"
        self.color = "BLUE"

        if app is None:
            app = get_app()
        self.logger = app.logger if app else logging.getLogger(self.name)

        # Core components
        self.server: Optional[WebSocketServer] = None
        self.clients: Dict[str, WebSocketClient] = {}
        self.pools: Dict[str, WebSocketPool] = {}

        # Tools interface
        self.tools = {
            "all": [
                ["version", "Show version"],
                ["create_server", "Create WebSocket server"],
                ["create_client", "Create WebSocket client"],
                ["create_pool", "Create connection pool"],
                ["list_pools", "List all pools"],
                ["get_stats", "Get connection statistics"],
                ["health_check", "Perform health check"]
            ],
            "name": self.name,
            "version": self.show_version,
            #"create_server": self.create_server,
            "create_client": self.create_client,
            "create_pool": self.create_pool,
            "list_pools": self.list_pools,
            "get_stats": self.get_statistics,
            "health_check": self.health_check
        }

        MainTool.__init__(self, load=self.on_start, v=self.version,
                          tool=self.tools, name=self.name,
                          logs=self.logger, color=self.color,
                          on_exit=self.on_exit)

    def on_start(self):
        """Initialize the WebSocketManager."""
        self.logger.info("🚀 WebSocketManager started")

    async def on_exit(self):
        """Cleanup on exit."""
        self.logger.info("🔄 Shutting down WebSocketManager")

        # Stop server
        if self.server:
            await self.server.stop()

        # Disconnect all clients
        for client in self.clients.values():
            await client.disconnect()

        self.logger.info("✅ WebSocketManager shutdown complete")

    def show_version(self):
        """Show current version."""
        return self.version

    async def create_server(self, host: str = "localhost", port: int = 8765,
                            non_blocking: bool = False) -> WebSocketServer:
        """Create and start a WebSocket server."""
        if non_blocking is None:
            return
        if 'test' in host:
            return
        if self.server is None:
            self.server = WebSocketServer(host, port)
            await self.server.start(non_blocking)
        return self.server

    def create_client(self, client_id: str) -> WebSocketClient:
        """Create a WebSocket client."""
        if client_id not in self.clients:
            self.clients[client_id] = WebSocketClient(client_id, self.logger)
        return self.clients[client_id]

    def create_pool(self, pool_id: str) -> WebSocketPool:
        """Create a standalone connection pool."""
        if pool_id not in self.pools:
            self.pools[pool_id] = WebSocketPool(pool_id)
        return self.pools[pool_id]

    def list_pools(self) -> Dict[str, Dict[str, Any]]:
        """List all connection pools with stats."""
        pools_info = {}

        # Server pools
        if self.server:
            for pool_id, pool in self.server.pools.items():
                pools_info[f"server.{pool_id}"] = {
                    "type": "server_pool",
                    "connections": pool.get_connection_count(),
                    "connection_ids": pool.get_connection_ids()
                }

        # Standalone pools
        for pool_id, pool in self.pools.items():
            pools_info[pool_id] = {
                "type": "standalone_pool",
                "connections": pool.get_connection_count(),
                "connection_ids": pool.get_connection_ids()
            }

        return pools_info

    def get_statistics(self) -> Dict[str, Any]:
        """Get comprehensive statistics."""
        stats = {
            "server": {
                "running": self.server is not None,
                "pools": len(self.server.pools) if self.server else 0,
                "total_connections": sum(
                    pool.get_connection_count()
                    for pool in (self.server.pools.values() if self.server else [])
                )
            },
            "clients": {
                "total": len(self.clients),
                "connected": sum(
                    1 for client in self.clients.values()
                    if client.state == ConnectionState.CONNECTED
                ),
                "states": {
                    state.value: sum(
                        1 for client in self.clients.values()
                        if client.state == state
                    ) for state in ConnectionState
                }
            },
            "pools": {
                "standalone": len(self.pools),
                "total_connections": sum(
                    pool.get_connection_count()
                    for pool in self.pools.values()
                )
            }
        }
        return stats

    async def health_check(self) -> Dict[str, Any]:
        """Perform comprehensive health check."""
        health = {
            "overall": "healthy",
            "server": "not_running" if not self.server else "running",
            "clients": {},
            "issues": []
        }

        # Check clients
        for client_id, client in self.clients.items():
            if client.state == ConnectionState.CONNECTED:
                # Perform actual health check if possible
                try:
                    if client.ws and not client.ws.closed:
                        health["clients"][client_id] = "healthy"
                    else:
                        health["clients"][client_id] = "unhealthy"
                        health["issues"].append(f"Client {client_id} connection closed")
                except Exception as e:
                    health["clients"][client_id] = "error"
                    health["issues"].append(f"Client {client_id}: {str(e)}")
            else:
                health["clients"][client_id] = client.state.value

        if health["issues"]:
            health["overall"] = "degraded"

        return health

    # Utility methods for easy access
    def get_server_pool(self, pool_id: str) -> Optional[WebSocketPool]:
        """Get a server pool by ID."""
        return self.server.get_pool(pool_id) if self.server else None

    def get_client(self, client_id: str) -> Optional[WebSocketClient]:
        """Get a client by ID."""
        return self.clients.get(client_id)

    async def broadcast_to_pool(self, pool_id: str, event: str, data: Dict[str, Any]) -> int:
        """Broadcast message to all connections in a pool."""
        message = WebSocketMessage(event=event, data=data).to_json()

        # Try server pool first
        if self.server:
            pool = self.server.get_pool(pool_id)
            if pool:
                return await pool.broadcast(message)

        # Try standalone pool
        pool = self.pools.get(pool_id)
        if pool:
            return await pool.broadcast(message)

        return 0
broadcast_to_pool(pool_id, event, data) async

Broadcast message to all connections in a pool.

Source code in toolboxv2/mods/WebSocketManager.py
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
async def broadcast_to_pool(self, pool_id: str, event: str, data: Dict[str, Any]) -> int:
    """Broadcast message to all connections in a pool."""
    message = WebSocketMessage(event=event, data=data).to_json()

    # Try server pool first
    if self.server:
        pool = self.server.get_pool(pool_id)
        if pool:
            return await pool.broadcast(message)

    # Try standalone pool
    pool = self.pools.get(pool_id)
    if pool:
        return await pool.broadcast(message)

    return 0
create_client(client_id)

Create a WebSocket client.

Source code in toolboxv2/mods/WebSocketManager.py
469
470
471
472
473
def create_client(self, client_id: str) -> WebSocketClient:
    """Create a WebSocket client."""
    if client_id not in self.clients:
        self.clients[client_id] = WebSocketClient(client_id, self.logger)
    return self.clients[client_id]
create_pool(pool_id)

Create a standalone connection pool.

Source code in toolboxv2/mods/WebSocketManager.py
475
476
477
478
479
def create_pool(self, pool_id: str) -> WebSocketPool:
    """Create a standalone connection pool."""
    if pool_id not in self.pools:
        self.pools[pool_id] = WebSocketPool(pool_id)
    return self.pools[pool_id]
create_server(host='localhost', port=8765, non_blocking=False) async

Create and start a WebSocket server.

Source code in toolboxv2/mods/WebSocketManager.py
457
458
459
460
461
462
463
464
465
466
467
async def create_server(self, host: str = "localhost", port: int = 8765,
                        non_blocking: bool = False) -> WebSocketServer:
    """Create and start a WebSocket server."""
    if non_blocking is None:
        return
    if 'test' in host:
        return
    if self.server is None:
        self.server = WebSocketServer(host, port)
        await self.server.start(non_blocking)
    return self.server
get_client(client_id)

Get a client by ID.

Source code in toolboxv2/mods/WebSocketManager.py
573
574
575
def get_client(self, client_id: str) -> Optional[WebSocketClient]:
    """Get a client by ID."""
    return self.clients.get(client_id)
get_server_pool(pool_id)

Get a server pool by ID.

Source code in toolboxv2/mods/WebSocketManager.py
569
570
571
def get_server_pool(self, pool_id: str) -> Optional[WebSocketPool]:
    """Get a server pool by ID."""
    return self.server.get_pool(pool_id) if self.server else None
get_statistics()

Get comprehensive statistics.

Source code in toolboxv2/mods/WebSocketManager.py
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def get_statistics(self) -> Dict[str, Any]:
    """Get comprehensive statistics."""
    stats = {
        "server": {
            "running": self.server is not None,
            "pools": len(self.server.pools) if self.server else 0,
            "total_connections": sum(
                pool.get_connection_count()
                for pool in (self.server.pools.values() if self.server else [])
            )
        },
        "clients": {
            "total": len(self.clients),
            "connected": sum(
                1 for client in self.clients.values()
                if client.state == ConnectionState.CONNECTED
            ),
            "states": {
                state.value: sum(
                    1 for client in self.clients.values()
                    if client.state == state
                ) for state in ConnectionState
            }
        },
        "pools": {
            "standalone": len(self.pools),
            "total_connections": sum(
                pool.get_connection_count()
                for pool in self.pools.values()
            )
        }
    }
    return stats
health_check() async

Perform comprehensive health check.

Source code in toolboxv2/mods/WebSocketManager.py
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
async def health_check(self) -> Dict[str, Any]:
    """Perform comprehensive health check."""
    health = {
        "overall": "healthy",
        "server": "not_running" if not self.server else "running",
        "clients": {},
        "issues": []
    }

    # Check clients
    for client_id, client in self.clients.items():
        if client.state == ConnectionState.CONNECTED:
            # Perform actual health check if possible
            try:
                if client.ws and not client.ws.closed:
                    health["clients"][client_id] = "healthy"
                else:
                    health["clients"][client_id] = "unhealthy"
                    health["issues"].append(f"Client {client_id} connection closed")
            except Exception as e:
                health["clients"][client_id] = "error"
                health["issues"].append(f"Client {client_id}: {str(e)}")
        else:
            health["clients"][client_id] = client.state.value

    if health["issues"]:
        health["overall"] = "degraded"

    return health
list_pools()

List all connection pools with stats.

Source code in toolboxv2/mods/WebSocketManager.py
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
def list_pools(self) -> Dict[str, Dict[str, Any]]:
    """List all connection pools with stats."""
    pools_info = {}

    # Server pools
    if self.server:
        for pool_id, pool in self.server.pools.items():
            pools_info[f"server.{pool_id}"] = {
                "type": "server_pool",
                "connections": pool.get_connection_count(),
                "connection_ids": pool.get_connection_ids()
            }

    # Standalone pools
    for pool_id, pool in self.pools.items():
        pools_info[pool_id] = {
            "type": "standalone_pool",
            "connections": pool.get_connection_count(),
            "connection_ids": pool.get_connection_ids()
        }

    return pools_info
on_exit() async

Cleanup on exit.

Source code in toolboxv2/mods/WebSocketManager.py
439
440
441
442
443
444
445
446
447
448
449
450
451
async def on_exit(self):
    """Cleanup on exit."""
    self.logger.info("🔄 Shutting down WebSocketManager")

    # Stop server
    if self.server:
        await self.server.stop()

    # Disconnect all clients
    for client in self.clients.values():
        await client.disconnect()

    self.logger.info("✅ WebSocketManager shutdown complete")
on_start()

Initialize the WebSocketManager.

Source code in toolboxv2/mods/WebSocketManager.py
435
436
437
def on_start(self):
    """Initialize the WebSocketManager."""
    self.logger.info("🚀 WebSocketManager started")
show_version()

Show current version.

Source code in toolboxv2/mods/WebSocketManager.py
453
454
455
def show_version(self):
    """Show current version."""
    return self.version

WebSocketClient

Robust WebSocket client with automatic reconnection.

Source code in toolboxv2/mods/WebSocketManager.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
class WebSocketClient:
    """Robust WebSocket client with automatic reconnection."""

    def __init__(self, client_id: str, logger: Optional[logging.Logger] = None):
        self.client_id = client_id
        self.logger = logger or logging.getLogger(f"WSClient.{client_id}")

        # Connection management
        self.ws: Optional[Any] = None
        self.server_url: Optional[str] = None
        self.state = ConnectionState.DISCONNECTED

        # Tasks and control
        self.should_reconnect = True
        self.reconnect_attempts = 0
        self.max_reconnect_attempts = 10
        self.connection_task: Optional[asyncio.Task] = None
        self.ping_task: Optional[asyncio.Task] = None

        # Message handling
        self.message_handlers: Dict[str, Callable] = {}
        self.message_queue = asyncio.Queue()

    async def connect(self, server_url: str, timeout: float = 30.0) -> bool:
        """Connect to WebSocket server."""
        if self.state in [ConnectionState.CONNECTED, ConnectionState.CONNECTING]:
            return True

        self.server_url = server_url
        self.state = ConnectionState.CONNECTING
        self.should_reconnect = True

        try:
            self.logger.info(f"Connecting to {server_url}")
            self.ws = await asyncio.wait_for(ws_connect(server_url), timeout=timeout)

            self.state = ConnectionState.CONNECTED
            self.reconnect_attempts = 0

            # Start background tasks
            self.connection_task = asyncio.create_task(self._listen_loop())
            self.ping_task = asyncio.create_task(self._ping_loop())

            self.logger.info("✅ Connected successfully")
            return True

        except Exception as e:
            self.logger.error(f"❌ Connection failed: {e}")
            self.state = ConnectionState.DISCONNECTED
            return False

    async def disconnect(self) -> None:
        """Gracefully disconnect."""
        self.should_reconnect = False
        self.state = ConnectionState.CLOSED

        # Cancel tasks
        for task in [self.connection_task, self.ping_task]:
            if task and not task.done():
                task.cancel()

        # Close connection
        if self.ws:
            try:
                await self.ws.close()
            except Exception:
                pass
            self.ws = None

        self.logger.info("✅ Disconnected")

    def register_handler(self, event: str, handler: Callable[[WebSocketMessage], Awaitable[None]]) -> None:
        """Register a message handler for specific events."""
        self.message_handlers[event] = handler
        self.logger.info(f"Registered handler for event: {event}")

    async def send_message(self, event: str, data: Dict[str, Any]) -> bool:
        """Send a message to the server."""
        if self.state != ConnectionState.CONNECTED or not self.ws:
            self.logger.warning("Cannot send message: not connected")
            return False

        try:
            message = WebSocketMessage(event=event, data=data)
            await self.ws.send(message.to_json())
            return True
        except Exception as e:
            self.logger.error(f"Failed to send message: {e}")
            await self._trigger_reconnect()
            return False

    async def _listen_loop(self) -> None:
        """Main message listening loop."""
        while self.should_reconnect and self.ws:
            try:
                message_raw = await asyncio.wait_for(self.ws.recv(), timeout=5.0)

                # Handle the message in background
                asyncio.create_task(self._handle_message(message_raw))

            except asyncio.TimeoutError:
                continue  # Normal timeout
            except ConnectionClosed:
                self.logger.warning("Connection closed by server")
                break
            except Exception as e:
                self.logger.error(f"Listen loop error: {e}")
                break

        if self.should_reconnect:
            await self._trigger_reconnect()

    async def _handle_message(self, message_raw: str) -> None:
        """Handle incoming messages."""
        try:
            message = WebSocketMessage.from_json(message_raw)

            if message.event in self.message_handlers:
                await self.message_handlers[message.event](message)
            else:
                self.logger.debug(f"No handler for event: {message.event}")

        except Exception as e:
            self.logger.error(f"Message handling error: {e}")

    async def _ping_loop(self) -> None:
        """Periodic ping to maintain connection."""
        while self.should_reconnect and self.state == ConnectionState.CONNECTED:
            try:
                await asyncio.sleep(20)  # Ping every 20 seconds

                if self.ws and not self.ws.closed:
                    pong_waiter = await self.ws.ping()
                    await asyncio.wait_for(pong_waiter, timeout=10.0)
                    self.logger.debug("📡 Ping successful")
                else:
                    break

            except Exception as e:
                self.logger.error(f"Ping failed: {e}")
                break

        if self.should_reconnect:
            await self._trigger_reconnect()

    async def _trigger_reconnect(self) -> None:
        """Trigger reconnection with exponential backoff."""
        if self.state == ConnectionState.RECONNECTING:
            return

        self.state = ConnectionState.RECONNECTING
        self.logger.info("🔄 Starting reconnection...")

        while (self.should_reconnect and
               self.reconnect_attempts < self.max_reconnect_attempts):

            self.reconnect_attempts += 1
            delay = min(2 ** self.reconnect_attempts, 60)  # Max 60s delay

            self.logger.info(f"Reconnect attempt {self.reconnect_attempts} in {delay}s")
            await asyncio.sleep(delay)

            try:
                if await self.connect(self.server_url):
                    return
            except Exception as e:
                self.logger.error(f"Reconnect attempt failed: {e}")

        self.logger.error("❌ Max reconnection attempts reached")
        self.should_reconnect = False
        self.state = ConnectionState.DISCONNECTED
connect(server_url, timeout=30.0) async

Connect to WebSocket server.

Source code in toolboxv2/mods/WebSocketManager.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
async def connect(self, server_url: str, timeout: float = 30.0) -> bool:
    """Connect to WebSocket server."""
    if self.state in [ConnectionState.CONNECTED, ConnectionState.CONNECTING]:
        return True

    self.server_url = server_url
    self.state = ConnectionState.CONNECTING
    self.should_reconnect = True

    try:
        self.logger.info(f"Connecting to {server_url}")
        self.ws = await asyncio.wait_for(ws_connect(server_url), timeout=timeout)

        self.state = ConnectionState.CONNECTED
        self.reconnect_attempts = 0

        # Start background tasks
        self.connection_task = asyncio.create_task(self._listen_loop())
        self.ping_task = asyncio.create_task(self._ping_loop())

        self.logger.info("✅ Connected successfully")
        return True

    except Exception as e:
        self.logger.error(f"❌ Connection failed: {e}")
        self.state = ConnectionState.DISCONNECTED
        return False
disconnect() async

Gracefully disconnect.

Source code in toolboxv2/mods/WebSocketManager.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
async def disconnect(self) -> None:
    """Gracefully disconnect."""
    self.should_reconnect = False
    self.state = ConnectionState.CLOSED

    # Cancel tasks
    for task in [self.connection_task, self.ping_task]:
        if task and not task.done():
            task.cancel()

    # Close connection
    if self.ws:
        try:
            await self.ws.close()
        except Exception:
            pass
        self.ws = None

    self.logger.info("✅ Disconnected")
register_handler(event, handler)

Register a message handler for specific events.

Source code in toolboxv2/mods/WebSocketManager.py
225
226
227
228
def register_handler(self, event: str, handler: Callable[[WebSocketMessage], Awaitable[None]]) -> None:
    """Register a message handler for specific events."""
    self.message_handlers[event] = handler
    self.logger.info(f"Registered handler for event: {event}")
send_message(event, data) async

Send a message to the server.

Source code in toolboxv2/mods/WebSocketManager.py
230
231
232
233
234
235
236
237
238
239
240
241
242
243
async def send_message(self, event: str, data: Dict[str, Any]) -> bool:
    """Send a message to the server."""
    if self.state != ConnectionState.CONNECTED or not self.ws:
        self.logger.warning("Cannot send message: not connected")
        return False

    try:
        message = WebSocketMessage(event=event, data=data)
        await self.ws.send(message.to_json())
        return True
    except Exception as e:
        self.logger.error(f"Failed to send message: {e}")
        await self._trigger_reconnect()
        return False

WebSocketPool

Manages a pool of WebSocket connections with actions and message routing.

Source code in toolboxv2/mods/WebSocketManager.py
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
class WebSocketPool:
    """Manages a pool of WebSocket connections with actions and message routing."""

    def __init__(self, pool_id: str):
        self.pool_id = pool_id
        self.connections: Dict[str, Any] = {}
        self.actions: Dict[str, Callable] = {}
        self.global_actions: Dict[str, Callable] = {}
        self.metadata: Dict[str, Any] = {}
        self.logger = logging.getLogger(f"WSPool.{pool_id}")

    async def add_connection(self, connection_id: str, websocket: Any) -> None:
        """Add a WebSocket connection to the pool."""
        self.connections[connection_id] = websocket
        self.logger.info(f"Added connection {connection_id} (total: {len(self.connections)})")

    async def remove_connection(self, connection_id: str) -> None:
        """Remove a WebSocket connection from the pool."""
        if connection_id in self.connections:
            del self.connections[connection_id]
            self.logger.info(f"Removed connection {connection_id} (remaining: {len(self.connections)})")

    def register_action(self, action_name: str, handler: Callable,
                        connection_ids: Optional[List[str]] = None) -> None:
        """Register an action handler for specific connections or globally."""
        if connection_ids is None:
            self.global_actions[action_name] = handler
            self.logger.info(f"Registered global action: {action_name}")
        else:
            for conn_id in connection_ids:
                if conn_id not in self.actions:
                    self.actions[conn_id] = {}
                self.actions[conn_id][action_name] = handler
            self.logger.info(f"Registered action {action_name} for connections: {connection_ids}")

    async def handle_message(self, connection_id: str, message: str) -> None:
        """Route incoming messages to appropriate handlers."""
        try:
            ws_message = WebSocketMessage.from_json(message)
            action = ws_message.event

            # Try global actions first
            if action in self.global_actions:
                await self.global_actions[action](self.pool_id, connection_id, ws_message)
            # Then try connection-specific actions
            elif connection_id in self.actions and action in self.actions[connection_id]:
                await self.actions[connection_id][action](self.pool_id, connection_id, ws_message)
            else:
                self.logger.warning(f"No handler for action '{action}' from {connection_id}")

        except json.JSONDecodeError:
            self.logger.error(f"Invalid JSON from {connection_id}: {message[:100]}")
        except Exception as e:
            self.logger.error(f"Error handling message from {connection_id}: {e}")

    async def broadcast(self, message: str, exclude_connection: Optional[str] = None) -> int:
        """Broadcast message to all connections in the pool."""
        sent_count = 0
        for conn_id, websocket in list(self.connections.items()):
            if conn_id != exclude_connection:
                try:
                    await websocket.send(message)
                    sent_count += 1
                except Exception as e:
                    self.logger.error(f"Failed to send to {conn_id}: {e}")
                    await self.remove_connection(conn_id)
        return sent_count

    async def send_to_connection(self, connection_id: str, message: str) -> bool:
        """Send message to a specific connection."""
        if connection_id in self.connections:
            try:
                await self.connections[connection_id].send(message)
                return True
            except Exception as e:
                self.logger.error(f"Failed to send to {connection_id}: {e}")
                await self.remove_connection(connection_id)
        return False

    def get_connection_ids(self) -> List[str]:
        """Get list of all connection IDs."""
        return list(self.connections.keys())

    def get_connection_count(self) -> int:
        """Get number of active connections."""
        return len(self.connections)

    async def close_all(self) -> None:
        """Close all connections in the pool."""
        for websocket in list(self.connections.values()):
            try:
                await websocket.close()
            except Exception:
                pass
        self.connections.clear()
add_connection(connection_id, websocket) async

Add a WebSocket connection to the pool.

Source code in toolboxv2/mods/WebSocketManager.py
68
69
70
71
async def add_connection(self, connection_id: str, websocket: Any) -> None:
    """Add a WebSocket connection to the pool."""
    self.connections[connection_id] = websocket
    self.logger.info(f"Added connection {connection_id} (total: {len(self.connections)})")
broadcast(message, exclude_connection=None) async

Broadcast message to all connections in the pool.

Source code in toolboxv2/mods/WebSocketManager.py
112
113
114
115
116
117
118
119
120
121
122
123
async def broadcast(self, message: str, exclude_connection: Optional[str] = None) -> int:
    """Broadcast message to all connections in the pool."""
    sent_count = 0
    for conn_id, websocket in list(self.connections.items()):
        if conn_id != exclude_connection:
            try:
                await websocket.send(message)
                sent_count += 1
            except Exception as e:
                self.logger.error(f"Failed to send to {conn_id}: {e}")
                await self.remove_connection(conn_id)
    return sent_count
close_all() async

Close all connections in the pool.

Source code in toolboxv2/mods/WebSocketManager.py
144
145
146
147
148
149
150
151
async def close_all(self) -> None:
    """Close all connections in the pool."""
    for websocket in list(self.connections.values()):
        try:
            await websocket.close()
        except Exception:
            pass
    self.connections.clear()
get_connection_count()

Get number of active connections.

Source code in toolboxv2/mods/WebSocketManager.py
140
141
142
def get_connection_count(self) -> int:
    """Get number of active connections."""
    return len(self.connections)
get_connection_ids()

Get list of all connection IDs.

Source code in toolboxv2/mods/WebSocketManager.py
136
137
138
def get_connection_ids(self) -> List[str]:
    """Get list of all connection IDs."""
    return list(self.connections.keys())
handle_message(connection_id, message) async

Route incoming messages to appropriate handlers.

Source code in toolboxv2/mods/WebSocketManager.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
async def handle_message(self, connection_id: str, message: str) -> None:
    """Route incoming messages to appropriate handlers."""
    try:
        ws_message = WebSocketMessage.from_json(message)
        action = ws_message.event

        # Try global actions first
        if action in self.global_actions:
            await self.global_actions[action](self.pool_id, connection_id, ws_message)
        # Then try connection-specific actions
        elif connection_id in self.actions and action in self.actions[connection_id]:
            await self.actions[connection_id][action](self.pool_id, connection_id, ws_message)
        else:
            self.logger.warning(f"No handler for action '{action}' from {connection_id}")

    except json.JSONDecodeError:
        self.logger.error(f"Invalid JSON from {connection_id}: {message[:100]}")
    except Exception as e:
        self.logger.error(f"Error handling message from {connection_id}: {e}")
register_action(action_name, handler, connection_ids=None)

Register an action handler for specific connections or globally.

Source code in toolboxv2/mods/WebSocketManager.py
79
80
81
82
83
84
85
86
87
88
89
90
def register_action(self, action_name: str, handler: Callable,
                    connection_ids: Optional[List[str]] = None) -> None:
    """Register an action handler for specific connections or globally."""
    if connection_ids is None:
        self.global_actions[action_name] = handler
        self.logger.info(f"Registered global action: {action_name}")
    else:
        for conn_id in connection_ids:
            if conn_id not in self.actions:
                self.actions[conn_id] = {}
            self.actions[conn_id][action_name] = handler
        self.logger.info(f"Registered action {action_name} for connections: {connection_ids}")
remove_connection(connection_id) async

Remove a WebSocket connection from the pool.

Source code in toolboxv2/mods/WebSocketManager.py
73
74
75
76
77
async def remove_connection(self, connection_id: str) -> None:
    """Remove a WebSocket connection from the pool."""
    if connection_id in self.connections:
        del self.connections[connection_id]
        self.logger.info(f"Removed connection {connection_id} (remaining: {len(self.connections)})")
send_to_connection(connection_id, message) async

Send message to a specific connection.

Source code in toolboxv2/mods/WebSocketManager.py
125
126
127
128
129
130
131
132
133
134
async def send_to_connection(self, connection_id: str, message: str) -> bool:
    """Send message to a specific connection."""
    if connection_id in self.connections:
        try:
            await self.connections[connection_id].send(message)
            return True
        except Exception as e:
            self.logger.error(f"Failed to send to {connection_id}: {e}")
            await self.remove_connection(connection_id)
    return False

WebSocketServer

WebSocket server with pool management.

Source code in toolboxv2/mods/WebSocketManager.py
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
class WebSocketServer:
    """WebSocket server with pool management."""

    def __init__(self, host: str = "localhost", port: int = 8765):
        self.host = host
        self.port = port
        self.pools: Dict[str, WebSocketPool] = {}
        self.server = None
        self.logger = logging.getLogger("WSServer")

    def create_pool(self, pool_id: str) -> WebSocketPool:
        """Create a new connection pool."""
        if pool_id not in self.pools:
            self.pools[pool_id] = WebSocketPool(pool_id)
            self.logger.info(f"Created pool: {pool_id}")
        return self.pools[pool_id]

    def get_pool(self, pool_id: str) -> Optional[WebSocketPool]:
        """Get an existing pool."""
        return self.pools.get(pool_id)

    async def handle_connection(self, websocket, path: str):
        """Handle new WebSocket connections."""
        connection_id = f"conn_{id(websocket)}"
        pool_id = path.strip('/') or 'default'

        pool = self.create_pool(pool_id)
        await pool.add_connection(connection_id, websocket)

        self.logger.info(f"New connection {connection_id} in pool {pool_id}")

        try:
            async for message in websocket:
                await pool.handle_message(connection_id, message)
        except ConnectionClosed:
            pass
        except Exception as e:
            self.logger.error(f"Connection error: {e}")
        finally:
            await pool.remove_connection(connection_id)

    async def start(self, non_blocking: bool = False) -> None:
        """Start the WebSocket server."""
        if non_blocking is None:
            return
        self.server = await ws_serve(self.handle_connection, self.host, self.port)
        self.logger.info(f"🚀 WebSocket server started on {self.host}:{self.port}")

        if not non_blocking:
            await self.server.wait_closed()

    async def stop(self) -> None:
        """Stop the server and close all connections."""
        if self.server:
            self.server.close()
            await self.server.wait_closed()

        # Close all pools
        for pool in self.pools.values():
            await pool.close_all()
        self.pools.clear()

        self.logger.info("✅ Server stopped")
create_pool(pool_id)

Create a new connection pool.

Source code in toolboxv2/mods/WebSocketManager.py
337
338
339
340
341
342
def create_pool(self, pool_id: str) -> WebSocketPool:
    """Create a new connection pool."""
    if pool_id not in self.pools:
        self.pools[pool_id] = WebSocketPool(pool_id)
        self.logger.info(f"Created pool: {pool_id}")
    return self.pools[pool_id]
get_pool(pool_id)

Get an existing pool.

Source code in toolboxv2/mods/WebSocketManager.py
344
345
346
def get_pool(self, pool_id: str) -> Optional[WebSocketPool]:
    """Get an existing pool."""
    return self.pools.get(pool_id)
handle_connection(websocket, path) async

Handle new WebSocket connections.

Source code in toolboxv2/mods/WebSocketManager.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
async def handle_connection(self, websocket, path: str):
    """Handle new WebSocket connections."""
    connection_id = f"conn_{id(websocket)}"
    pool_id = path.strip('/') or 'default'

    pool = self.create_pool(pool_id)
    await pool.add_connection(connection_id, websocket)

    self.logger.info(f"New connection {connection_id} in pool {pool_id}")

    try:
        async for message in websocket:
            await pool.handle_message(connection_id, message)
    except ConnectionClosed:
        pass
    except Exception as e:
        self.logger.error(f"Connection error: {e}")
    finally:
        await pool.remove_connection(connection_id)
start(non_blocking=False) async

Start the WebSocket server.

Source code in toolboxv2/mods/WebSocketManager.py
368
369
370
371
372
373
374
375
376
async def start(self, non_blocking: bool = False) -> None:
    """Start the WebSocket server."""
    if non_blocking is None:
        return
    self.server = await ws_serve(self.handle_connection, self.host, self.port)
    self.logger.info(f"🚀 WebSocket server started on {self.host}:{self.port}")

    if not non_blocking:
        await self.server.wait_closed()
stop() async

Stop the server and close all connections.

Source code in toolboxv2/mods/WebSocketManager.py
378
379
380
381
382
383
384
385
386
387
388
389
async def stop(self) -> None:
    """Stop the server and close all connections."""
    if self.server:
        self.server.close()
        await self.server.wait_closed()

    # Close all pools
    for pool in self.pools.values():
        await pool.close_all()
    self.pools.clear()

    self.logger.info("✅ Server stopped")

WhatsAppTb

client

DocumentSystem
Source code in toolboxv2/mods/WhatsAppTb/client.py
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
class DocumentSystem:
    def __init__(self, storage: BlobStorage):
        self.storage = storage
        self.media_types = {
            'document': ['pdf', 'doc', 'docx', 'txt'],
            'image': ['jpg', 'jpeg', 'png', 'gif'],
            'video': ['mp4', 'mov', 'avi']
        }

    def list_documents(self, filter_type: str = None) -> list[dict]:
        """List all documents with metadata"""
        docs = []
        for blob_id in self.storage._get_all_blob_ids():
            with BlobFile(blob_id, 'r', self.storage) as f:
                metadata = f.read_json()
                if metadata:
                    docs.append({
                        'id': blob_id,
                        'name': metadata.get('filename', blob_id),
                        'type': metadata.get('type', 'document'),
                        'size': metadata.get('size', 0),
                        'modified': metadata.get('timestamp', ''),
                        'preview': metadata.get('preview', '')
                    })
        if filter_type:
            return [d for d in docs if d['type'] == filter_type]
        return docs

    def save_document(self, file_data: bytes, filename: str, file_type: str) -> str:
        """Save a document with metadata"""
        blob_id = self.storage._generate_blob_id()
        metadata = {
            'filename': filename,
            'type': file_type,
            'size': len(file_data),
            'timestamp': datetime.now().isoformat(),
            'preview': self._generate_preview(file_data, file_type)
        }

        with BlobFile(blob_id, 'w', self.storage) as f:
            f.write_json(metadata)
            f.write(file_data)
        return blob_id

    def delete_document(self, blob_id: str) -> bool:
        """Delete a document"""
        try:
            self.storage.delete_blob(blob_id)
            return True
        except Exception as e:
            logging.error(f"Delete failed: {str(e)}")
            return False

    def search_documents(self, query: str) -> list[dict]:
        """Search documents by filename or content"""
        results = []
        for doc in self.list_documents():
            if query.lower() in doc['name'].lower() or self._search_in_content(doc['id'], query):
                results.append(doc)
        return results

    def _generate_preview(self, data: bytes, file_type: str) -> str:
        """Generate preview based on file type"""
        if file_type in self.media_types['image']:
            return f"Image preview: {data[:100].hex()}"
        elif file_type in self.media_types['video']:
            return "Video preview unavailable"
        return data[:100].decode('utf-8', errors='ignore')

    def _search_in_content(self, blob_id: str, query: str) -> bool:
        """Search content within documents"""
        try:
            with BlobFile(blob_id, 'r', self.storage) as f:
                content = f.read().decode('utf-8', errors='ignore')
                return query.lower() in content.lower()
        except:
            return False
delete_document(blob_id)

Delete a document

Source code in toolboxv2/mods/WhatsAppTb/client.py
112
113
114
115
116
117
118
119
def delete_document(self, blob_id: str) -> bool:
    """Delete a document"""
    try:
        self.storage.delete_blob(blob_id)
        return True
    except Exception as e:
        logging.error(f"Delete failed: {str(e)}")
        return False
list_documents(filter_type=None)

List all documents with metadata

Source code in toolboxv2/mods/WhatsAppTb/client.py
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def list_documents(self, filter_type: str = None) -> list[dict]:
    """List all documents with metadata"""
    docs = []
    for blob_id in self.storage._get_all_blob_ids():
        with BlobFile(blob_id, 'r', self.storage) as f:
            metadata = f.read_json()
            if metadata:
                docs.append({
                    'id': blob_id,
                    'name': metadata.get('filename', blob_id),
                    'type': metadata.get('type', 'document'),
                    'size': metadata.get('size', 0),
                    'modified': metadata.get('timestamp', ''),
                    'preview': metadata.get('preview', '')
                })
    if filter_type:
        return [d for d in docs if d['type'] == filter_type]
    return docs
save_document(file_data, filename, file_type)

Save a document with metadata

Source code in toolboxv2/mods/WhatsAppTb/client.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def save_document(self, file_data: bytes, filename: str, file_type: str) -> str:
    """Save a document with metadata"""
    blob_id = self.storage._generate_blob_id()
    metadata = {
        'filename': filename,
        'type': file_type,
        'size': len(file_data),
        'timestamp': datetime.now().isoformat(),
        'preview': self._generate_preview(file_data, file_type)
    }

    with BlobFile(blob_id, 'w', self.storage) as f:
        f.write_json(metadata)
        f.write(file_data)
    return blob_id
search_documents(query)

Search documents by filename or content

Source code in toolboxv2/mods/WhatsAppTb/client.py
121
122
123
124
125
126
127
def search_documents(self, query: str) -> list[dict]:
    """Search documents by filename or content"""
    results = []
    for doc in self.list_documents():
        if query.lower() in doc['name'].lower() or self._search_in_content(doc['id'], query):
            results.append(doc)
    return results
WhatsAppAssistant dataclass
Source code in toolboxv2/mods/WhatsAppTb/client.py
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
@dataclass
class WhatsAppAssistant:
    whc: WhClient
    isaa: 'Tools'
    agent: Optional['Agent'] = None
    credentials: Credentials | None = None
    state: AssistantState = AssistantState.OFFLINE

    # Service clients
    gmail_service: Any = None
    calendar_service: Any = None

    start_time: Any = None

    blob_docs_system: Any = None
    duration_minutes: int = 20
    credentials_path: str = "/root/Toolboxv2/credentials.json"
    # Progress messengers
    progress_messengers: dict[str, 'ProgressMessenger'] = field(default_factory=dict)
    buttons: dict[str, dict] = field(default_factory=dict)
    history: FileCache = field(default_factory=FileCache)

    pending_actions: dict[str, dict] = field(default_factory=dict)


    def __post_init__(self):

        self.start_time = datetime.now()
        self.processed_messages = set()
        self.message_lock = threading.Lock()
        self.audio_processor = None
        self.blob_docs_system = DocumentSystem(BlobStorage())
        self.stt = get_app().run_any(TBEF.AUDIO.STT_GENERATE,
                                     model="openai/whisper-small",
                                     row=False, device=1)

        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}

        self.load_credentials()
        self.setup_progress_messengers()
        self.setup_interaction_buttons()
        self.history = FileCache(folder=".data/WhatsAppAssistant")
        self.state = AssistantState.ONLINE

    async def generate_authorization_url(self, *a):
        """
        Generate an authorization URL for user consent

        :return: Authorization URL for the user to click and authorize access
        """
        from google_auth_oauthlib.flow import Flow
        # Define the scopes required for Gmail and Calendar
        SCOPES = [
            'https://www.googleapis.com/auth/gmail.modify',
            'https://www.googleapis.com/auth/calendar'
        ]

        # Create a flow instance to manage the OAuth 2.0 authorization process
        flow = Flow.from_client_secrets_file(
            self.credentials_path,
            scopes=SCOPES,
            redirect_uri='urn:ietf:wg:oauth:2.0:oob'  # Use 'urn:ietf:wg:oauth:2.0:oob' for desktop apps
        )

        # Generate the authorization URL
        authorization_url, _ = flow.authorization_url(
            access_type='offline',  # Allows obtaining refresh token
            prompt='consent'  # Ensures user is always prompted for consent
        )
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'auth',
                                                                              'step': 'awaiting_key'}
        return {
            'type': 'quick_reply',
            'text': f'Url to log in {authorization_url}',
            'options': {'cancel': '❌ Cancel Upload'}
        }

    def complete_authorization(self, message: Message):
        """
        Complete the authorization process using the authorization code

        :param authorization_code: Authorization code received from Google
        """
        from google_auth_oauthlib.flow import Flow
        authorization_code = message.content
        # Define the scopes required for Gmail and Calendar
        SCOPES = [
            'https://www.googleapis.com/auth/gmail.modify',
            'https://www.googleapis.com/auth/calendar'
        ]

        # Create a flow instance to manage the OAuth 2.0 authorization process
        flow = Flow.from_client_secrets_file(
            self.credentials_path,
            scopes=SCOPES,
            redirect_uri='urn:ietf:wg:oauth:2.0:oob'
        )

        # Exchange the authorization code for credentials
        flow.fetch_token(code=authorization_code)
        self.credentials = flow.credentials

        # Save the credentials for future use
        self.save_credentials()

        # Initialize services
        self.init_services()
        return "Done"


    def save_credentials(self):
        """
        Save the obtained credentials to a file for future use
        """
        if not os.path.exists('token'):
            os.makedirs('token')

        with open('token/google_token.json', 'w') as token_file:
            token_file.write(self.credentials.to_json())


    def load_credentials(self):
        """
        Load previously saved credentials if available

        :return: Whether credentials were successfully loaded
        """
        try:
            self.credentials = Credentials.from_authorized_user_file('token/google_token.json')
            self.init_services()
            return True
        except FileNotFoundError:
            return False


    def init_services(self):
        """
        Initialize Gmail and Calendar services
        """
        from googleapiclient.discovery import build

        self.gmail_service = build('gmail', 'v1', credentials=self.credentials)
        self.calendar_service = build('calendar', 'v3', credentials=self.credentials)
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}

    def setup_progress_messengers(self):
        """Initialize progress messengers for different types of tasks"""
        self.progress_messengers = {
            'task': self.whc.progress_messenger0,
            'email': self.whc.progress_messenger1,
            'calendar': self.whc.progress_messenger2
        }

    def setup_interaction_buttons(self):
        """Define WhatsApp interaction buttons for different functionalities"""
        self.buttons = {
            'menu': {
                'header': 'Digital Assistant',
                'body': 'Please select an option:',
                'footer': '-- + --',
                'action': {
                    'button': 'Menu',
                    'sections': [
                        {
                            'title': 'Main Functions',
                            'rows': [
                                {'id': 'agent', 'title': 'Agent Controls', 'description': 'Manage your AI assistant'},
                                {'id': 'email', 'title': 'Email Management', 'description': 'Handle your emails'},
                                {'id': 'calendar', 'title': 'Calendar', 'description': 'Manage your schedule'},
                                {'id': 'docs', 'title': 'Documents', 'description': 'Handle documents'},
                                {'id': 'system', 'title': 'System', 'description': 'System controls and metrics'}
                            ]
                        }
                    ]
                }
            },
            'agent': self._create_agent_controls_buttons(),
            'email': self._create_email_controls_buttons(),
            'calendar': self._create_calendar_controls_buttons(),
            'docs': self._create_docs_controls_buttons(),
            'system': self._create_system_controls_buttons()
        }

    @staticmethod
    def _create_agent_controls_buttons():
        return {
            'header': 'Agent Controls',
            'body': 'Manage your AI assistant:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'agent-task', 'title': 'Agent Task', 'description': 'Run the agent'},
                            {'id': 'start', 'title': 'Start Agent', 'description': 'Run taskstack in background'},
                            {'id': 'stop', 'title': 'Stop Agent', 'description': 'Stop taskstack execution'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'system-task', 'title': 'System Task',
                             'description': 'Run the Isaa Reasoning Agent system'},
                            {'id': 'tasks', 'title': 'Task Stack', 'description': 'View and manage tasks'},
                            {'id': 'memory', 'title': 'Clear Memory', 'description': 'Reset agent memory'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_email_controls_buttons():
        return {
            'header': 'Email Management',
            'body': 'Handle your emails:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'check', 'title': 'Check Emails', 'description': 'View recent emails'},
                            {'id': 'send', 'title': 'Send Email', 'description': 'Compose new email'},
                            {'id': 'summary', 'title': 'Get Summary', 'description': 'Summarize emails'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'search', 'title': 'Search', 'description': 'Search emails'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_calendar_controls_buttons():
        return {
            'header': 'Calendar Management',
            'body': 'Manage your schedule:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'today', 'title': 'Today\'s Events', 'description': 'View today\'s schedule'},
                            {'id': 'add', 'title': 'Add Event', 'description': 'Create new event'},
                            {'id': 'upcoming', 'title': 'Upcoming', 'description': 'View upcoming events'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'find_slot', 'title': 'Find Time Slot', 'description': 'Find available time'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_docs_controls_buttons():
        return {
            'header': 'Document Management',
            'body': 'Handle your documents:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'upload', 'title': 'Upload', 'description': 'Add new document'},
                            {'id': 'list', 'title': 'List Documents', 'description': 'View all documents'},
                            {'id': 'search', 'title': 'Search', 'description': 'Search documents'}
                        ]
                    },
                    {
                        'title': 'Advanced Actions',
                        'rows': [
                            {'id': 'delete', 'title': 'Delete', 'description': 'Remove document'}
                        ]
                    }
                ]
            }
        }

    @staticmethod
    def _create_system_controls_buttons():
        return {
            'header': 'System Controls',
            'body': 'System management:',
            'action': {
                'button': 'Select',
                'sections': [
                    {
                        'title': 'Basic Actions',
                        'rows': [
                            {'id': 'status', 'title': 'System Status', 'description': 'View current status'},
                            {'id': 'restart', 'title': 'Restart', 'description': 'Restart system'},
                            {'id': 'connect', 'title': 'Connect', 'description': 'Connect to Google Calendar and Email'}
                        ]
                    }
                ]
            }
        }

    async def handle_message(self, message: 'Message'):
        """Main message handler for incoming WhatsApp messages"""

        # Deduplication check
        with self.message_lock:
            if message.id in self.processed_messages:
                return
            last_ts = time.time()
            print(last_ts)
            if len(self.processed_messages) > 0:
                m_id, last_ts = self.processed_messages.pop()
                self.processed_messages.add((m_id, last_ts))

            print("DUPLICATION P", message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0) , last_ts)
            if float(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0)) < last_ts - 120:
                return
            self.processed_messages.add((message.id, time.perf_counter()))

        # Mark message as read
        message.mark_as_read()

        # Extract content and type
        content_type = message.type
        content = message.content

        print(f"message.content {content=} {content_type=} {message.data=}")

        try:
            if content_type == 'interactive':
                await self.handle_interactive(message)
            elif content_type == 'audio':
                await self.handle_audio_message(message)
            elif content_type in ['document', 'image', 'video']:
                response = await self.handle_media_message(message)
                self.save_reply(message, response)
            elif content_type == 'text':
                if content.lower() == "menu":
                    self.whc.messenger.send_button(
                        recipient_id=self.whc.progress_messenger0.recipient_phone,
                        button=self.buttons[content.lower()]
                    )
                else:
                    await self.helper_text(message)
            else:
                message.reply("Unsupported message type")
        #except Exception as e:
        #    logging.error(f"Message handling error: {str(e)}")
        #   message.reply("❌ Error processing request")
        finally:
            # Cleanup old messages (keep 1 hour history)
            with self.message_lock:
                self._clean_processed_messages()

    async def helper_text(self, message: 'Message', return_text=False):
        if not isinstance(message.content, str) and not len(message.content) > 0:
            content = self.whc.messenger.get_message(message.data)
            print(f"contents {content=}, {message.content=}")
            message.content = content
        self.history.set(message.id, message.content)
        if len(self.pending_actions[self.whc.progress_messenger0.recipient_phone].keys()) != 0:
            message.reply(
                f"Open Interaction : {json.dumps(self.pending_actions[self.whc.progress_messenger0.recipient_phone], indent=2)}")
            if self.pending_actions[self.whc.progress_messenger0.recipient_phone].get('type') == 'auth':
                res = self.complete_authorization(message)
                self.save_reply(message, res)
            res = await self.handle_calendar_actions(message)
            if res:
                self.save_reply(message, res)
                return
            res2 = await self.handle_email_actions(message)
            if res2:
                self.save_reply(message, res2)
                return
            await self.handle_agent_actions(message)
            return
        await self.handle_agent_actions(message)

    async def handle_interactive(self, message: Message):
        """Handle all interactive messages"""
        content = self.whc.messenger.get_interactive_response(message.data)
        if content.get("type") == "list_reply":
            await self.handle_button_interaction(content.get("list_reply"), message)
        elif content.get("type") == "button_reply":
            print(content)

    async def handle_audio_message(self, message: 'Message'):
        """Process audio messages with STT and TTS"""
        # Download audio
        progress = self.progress_messengers['task']
        stop_flag = threading.Event()
        # message_id = progress.send_initial_message(mode="loading")
        progress.message_id = message.id
        progress.start_loading_in_background(stop_flag)

        content = self.whc.messenger.get_audio(message.data)
        audio_file_name = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')), mime_type='audio/opus', file_path=".data/temp")
        print(f"audio_file_name {audio_file_name}")
        if audio_file_name is None:
            message.reply("Could not process audio file")
            stop_flag.set()
            return

        text = self.stt(audio_file_name)['text']
        if not text:
            message.reply("Could not process audio")
            stop_flag.set()
            return

        message.reply("Transcription :\n "+ text)
        message.content = text
        agent_res = await self.helper_text(message, return_text=True)

        if agent_res is not None:
            pass

        stop_flag.set()
        # Process text and get response
        # response = await self.process_input(text, message)

        # Convert response to audio
        #audio_file = self.audio_processor.tts(response)
        #audio_file = None # TODO
        #self.whc.messenger.send_audio(
        #    audio=audio_file,
        #    recipient_id=self.whc.progress_messenger0.recipient_phone,
        #)

    async def confirm(self, message: Message):
        status = self.pending_actions[self.whc.progress_messenger0.recipient_phone]
        if status.get('type') == "create_event":
            if status.get('step') == "confirm_envet":
                event = self._create_calendar_event(status.get('event_data'))
                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ Event created!\n{event.get('htmlLink')}"
            return "❌"
        elif status.get('type') == "compose_email":
            if status.get('step') == "confirm_email":
                # Send email
                result = self.gmail_service.users().messages().send(
                    userId='me',
                    body=self._build_email_draft(status['draft'])
                ).execute()
                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ Email sent! Message ID: {result['id']}"
            return "❌"
        return "❌ Done"

    async def cancel(self, *a):
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
        return "✅ cancel Done"

    async def handle_button_interaction(self, content: dict, message: Message):
        """Handle button click interactions"""
        button_id = content['id']

        # First check if it's a main menu button
        if button_id in self.buttons:
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button=self.buttons[button_id]
            )
            return

        # Handle action buttons
        action_handlers = {
            # Agent controls
            'start': self.start_agent,
            'stop': self.stop_agent,
            'tasks': self.show_task_stack,
            'memory': self.clear_memory,
            'system-task': self.system_task,
            'agent-task': self.agent_task,

            # Email controls
            'check': self.check_emails,
            'send': self.start_email_compose,
            'summary': self.email_summary,
            'search': self.email_search,

            # Calendar controls
            'today': self.show_today_events,
            'add': self.start_event_create,
            'upcoming': self.show_upcoming_events,
            'find_slot': self.find_time_slot,

            # Document controls
            'upload': self.start_document_upload,
            'list': self.list_documents,
            'search_docs': self.search_documents,
            'delete': self.delete_document,

            # System controls
            'status': self.system_status,
            'restart': self.restart_system,
            'connect': self.generate_authorization_url,

            'cancel': self.cancel,
            'confirm': self.confirm,
        }
        if button_id in action_handlers:
            try:
                # Start progress indicator
                progress = self.progress_messengers['task']
                stop_flag = threading.Event()
                # message_id = progress.send_initial_message(mode="loading")
                progress.message_id = message.id
                progress.start_loading_in_background(stop_flag)

                # Execute handler

                result = await action_handlers[button_id](message)


                # Send result
                if isinstance(result, str):
                    self.save_reply(message, result)
                elif isinstance(result, dict):  # For structured responses
                    self.send_structured_response(result)

                stop_flag.set()
            finally:
                #except Exception as e:
                stop_flag.set()
            #    message.reply(f"❌ Error processing {button_id}: {str(e)}")
        elif 'event_' in button_id:
            res = await self.get_event_details(button_id.replace("event_", ''))
            if isinstance(res, str):
                self.save_reply(message, res)
                return
            for r in res:
                if isinstance(r, str):
                    self.save_reply(message, r)
                else:
                    self.whc.messenger.send_location(**r)

        elif 'email_' in button_id:
            res = await self.get_email_details(button_id.replace("email_", ''))
            self.save_reply(message, res)
        else:
            message.reply("⚠️ Unknown command")

    def send_structured_response(self, result: dict):
        """Send complex responses using appropriate WhatsApp features"""
        if result['type'] == 'list':
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button={
                    'header': result.get('header', ''),
                    'body': result.get('body', ''),
                    'footer': result.get('footer', ''),
                    'action': {
                        'button': 'Action',
                        'sections': result['sections']
                    }
                }
            )
        elif result['type'] == 'quick_reply':
            self.whc.messenger.send_button(
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                button={
                    'header': "Quick reply",
                    'body': result['text'],
                    'footer': '',
                    'action': {'button': 'Action', 'sections': [{
                        'title': 'View',
                        'rows': [{'id': k, 'title': v[:23]} for k, v in result['options'].items()]
                    }]}
                }
            )

        elif result['type'] == 'media':
            if result['media_type'] == 'image':
                self.whc.messenger.send_image(
                    image=result['url'],
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    caption=result.get('caption', '')
                )
            elif result['media_type'] == 'document':
                self.whc.messenger.send_document(
                    document=result['url'],
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    caption=result.get('caption', '')
                )

    async def clear_memory(self, message):
        self.agent.reset_context()
        self.agent.taskstack.tasks = []
        return "🧠 Memory cleared successfully"

    async def system_task(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'system',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "Now prompt the 🧠ISAA-System 📝",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def agent_task(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'self-agent',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "Now prompt the self-agent 📝",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def check_emails(self, message, query=""):
        """Improved email checking with WhatsApp API formatting"""
        if not self.gmail_service:
            return "⚠️ Gmail service not configured"

        try:
            results = self.gmail_service.users().messages().list(
                userId='me',
                maxResults=10,
                labelIds=['INBOX'],
                q=query
            ).execute()

            emails = []
            for msg in results.get('messages', [])[:10]:
                email_data = self.gmail_service.users().messages().get(
                    userId='me',
                    id=msg['id'],
                    format='metadata'
                ).execute()

                headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
                emails.append({
                    'id': msg['id'],
                    'from': headers.get('From', 'Unknown'),
                    'subject': headers.get('Subject', 'No Subject'),
                    'date': headers.get('Date', 'Unknown'),
                    'snippet': email_data.get('snippet', ''),
                    'unread': 'UNREAD' in email_data.get('labelIds', [])
                })

            return {
                'type': 'list',
                'header': '📨 Recent Emails',
                'body': 'Tap to view full email',
                'footer': 'Email Manager',
                'sections': [{
                    'title': f"Inbox ({len(emails)} emails)",
                    'rows': [{
                        'id': f"email_{email['id']}",
                        'title': f"{'📬' if email['unread'] else '📭'} {email['subject']}"[:23],
                        'description': f"From: {email['from']}\n{email['snippet']}"[:45]
                    } for email in emails]
                }]
            }
        except Exception as e:
            return f"⚠️ Error fetching emails: {str(e)}"

    async def get_email_details(self, email_id):
        """Retrieve and format full email details"""
        if not self.gmail_service:
            return "⚠️ Gmail service not configured"

        try:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=email_id,
                format='full'
            ).execute()

            headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
            body = ""
            for part in email_data.get('payload', {}).get('parts', []):
                if part['mimeType'] == 'text/plain':
                    body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
                    break

            formatted_text = (
                f"📧 *Email Details*\n\n"
                f"From: {headers.get('From', 'Unknown')}\n"
                f"Subject: {headers.get('Subject', 'No Subject')}\n"
                f"Date: {headers.get('Date', 'Unknown')}\n\n"
                f"{body[:15000]}{'...' if len(body) > 15000 else ''}"
            )
            return  self.agent.mini_task(
                formatted_text , "system", "Summarize the email in bullet points with key details"
            )
        except Exception as e:
            return f"⚠️ Error fetching email: {str(e)}"

    async def email_summary(self, message):
        """Generate AI-powered email summaries"""
        try:
            messages = self.gmail_service.users().messages().list(
                userId='me',
                maxResults=3,
                labelIds=['INBOX']
            ).execute().get('messages', [])

            email_contents = []
            for msg in messages[:3]:
                email_data = self.gmail_service.users().messages().get(
                    userId='me',
                    id=msg['id'],
                    format='full'
                ).execute()
                email_contents.append(self._parse_email_content(email_data))

            summary = self.agent.mini_task(
                "\n\n".join(email_contents) , "system", "Summarize these emails in bullet points with key details:"
            )

            return f"📋 Email Summary:\n{summary}\n\n*Powered by AI*"
        except Exception as e:
            logging.error(f"Summary failed: {str(e)}")
            return f"❌ Could not generate summary: {str(e)}"

    async def email_search(self, message):
        """Initiate email search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'email_search',
            'step': 'await_query'
        }
        return {
            'type': 'quick_reply',
            'text': "🔍 What would you like to search for?",
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def start_email_compose(self, message):
        """Enhanced email composition workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'compose_email',
            'step': 'subject',
            'draft': {'attachments': []}
        }
        return {
            'type': 'quick_reply',
            'text': "📝 Let's compose an email\n\nSubject:",
            'options': {'cancel': '❌ Cancel Composition'}
        }

    async def handle_email_actions(self, message):
        """Handle multi-step email workflows"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'compose_email':
            return await self._handle_email_composition(message, user_state)
        if user_state.get('type') == 'email_search':
            return await self.check_emails(message, self.agent.mini_task("""Conventire Pezise zu einer googel str only query using : Gmail Suchoperatoren!

Basis-Operatoren:
- from: Absender
- to: Empfänger
- subject: Betreff
- label: Gmail Label
- has:attachment Anhänge
- newer_than:7d Zeitfilter
- before: Datum vor
- after: Datum nach

Erweiterte Operatoren:
- in:inbox
- in:sent
- in:spam
- cc: Kopie
- bcc: Blindkopie
- is:unread
- is:read
- larger:10M Größenfilter
- smaller:5M
- filename:pdf Dateityp

Profi-Tipps:
- Kombinierbar mit UND/ODER
- Anführungszeichen für exakte Suche
- Negation mit -
 beispeile : 'Ungelesene Mails letzte Woche': -> 'is:unread newer_than:7d'

""", "user",message.content))


        return None

    async def _handle_email_composition(self, message, state):
        if state['step'] == 'subject':
            state['draft']['subject'] = message.content
            state['step'] = 'body'
            return {
                'type': 'quick_reply',
                'text': "✍️ Email body:",
                'options': {'attach': '📎 Add Attachment', 'send': '📤 Send Now'}
            }

        elif state['step'] == 'body':
            if message.content == 'attach':
                state['step'] = 'attachment'
                return "📎 Please send the file you want to attach"

            state['draft']['body'] = message.content
            state['step'] = 'confirm_email'
            return {
                'type': 'quick_reply',
                'text': f"📧 Ready to send?\n\nSubject: {state['draft']['subject']}\n\n{state['draft']['body']}",
                'options': {'confirm': '✅ Send', 'cancel': '❌ cancel'}
            }

        elif state['step'] == 'attachment':
            # Handle attachment upload
            file_type = message.type
            if file_type not in ['document', 'image']:
                return "❌ Unsupported file type"

            media_url = getattr(message, file_type).id
            media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=media_url), mime_type=media_url.type, file_path=".data/temp")
            state['draft']['attachments'].append(media_data)
            state['step'] = 'body'
            return "📎 Attachment added! Add more or send the email"


    def _parse_email_content(self, email_data):
        """Extract readable content from email payload"""
        parts = email_data.get('payload', {}).get('parts', [])
        body = ""
        for part in parts:
            if part['mimeType'] == 'text/plain':
                body += base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
        return f"Subject: {email_data.get('subject', '')}\nFrom: {email_data.get('from', '')}\n\n{body}"

    def _build_email_draft(self, draft):
        """Create MIME message from draft data"""
        message = MIMEMultipart()
        message['to'] = draft.get('to', '')
        message['subject'] = draft['subject']
        message.attach(MIMEText(draft['body']))

        for attachment in draft['attachments']:
            part = MIMEBase('application', 'octet-stream')
            part.set_payload(attachment)
            encoders.encode_base64(part)
            part.add_header('Content-Disposition', 'attachment')
            message.attach(part)

        return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}

    def _get_email_subject(self, msg):
        headers = msg.get('payload', {}).get('headers', [])
        return next((h['value'] for h in headers if h['name'] == 'Subject'), 'No Subject')

    def _get_email_sender(self, msg):
        headers = msg.get('payload', {}).get('headers', [])
        return next((h['value'] for h in headers if h['name'] == 'From'), 'Unknown Sender')

    def _get_email_snippet(self, msg):
        return msg.get('snippet', '')[:100] + '...'
    # Calendar Handlers

    # Calendar Functions
    def _format_event_time(self, event):
        """Improved time formatting for calendar events"""
        start = event['start'].get('dateTime', event['start'].get('date'))
        end = event['end'].get('dateTime', event['end'].get('date'))

        try:
            start_dt = parser.parse(start)
            end_dt = parser.parse(end)
            if 'T' in start:
                return f"{start_dt.strftime('%a %d %b %H:%M')} - {end_dt.strftime('%H:%M')}"
            return f"{start_dt.strftime('%d %b %Y')} (All Day)"
        except:
            return "Time not specified"

    async def get_event_details(self, event_id):
        """Retrieve and format calendar event details with location support"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            event = self.calendar_service.events().get(
                calendarId='primary',
                eventId=event_id
            ).execute()

            response = [ (
                    f"📅 *Event Details*\n\n"
                    f"Title: {event.get('summary', 'No title')}\n"
                    f"Time: {self._format_event_time(event)}\n"
                    f"Location: {event.get('location', 'Not specified')}\n\n"
                    f"{event.get('description', 'No description')[:1000]}"
                )]

            if 'geo' in event:
                response.append({
                    'lat': float(event['geo']['latitude']),
                    'long': float(event['geo']['longitude']),
                    'name': event.get('location', 'Event Location'),
                    'address': event.get('location', ''),
                    'recipient_id': self.whc.progress_messenger0.recipient_phone
                })
            return response
        except Exception as e:
            return f"⚠️ Error fetching event: {str(e)}"

    async def show_today_events(self, message):
        """Show today's calendar events"""
        if not self.calendar_service:
            message.replay("service not online")

        now = datetime.utcnow().isoformat() + 'Z'
        end_of_day = (datetime.now() + timedelta(days=1)).replace(
            hour=0, minute=0, second=0).isoformat() + 'Z'

        events_result = self.calendar_service.events().list(
            calendarId='primary',
            timeMin=now,
            timeMax=end_of_day,
            singleEvents=True,
            orderBy='startTime'
        ).execute()

        events = events_result.get('items', [])
        return self._format_calendar_response(events, "Today's Events")

    # Updated Calendar List Handlers
    async def show_upcoming_events(self, message):
        """Show upcoming events with interactive support"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            now = datetime.utcnow().isoformat() + 'Z'
            next_week = (datetime.now() + timedelta(days=7)).isoformat() + 'Z'

            events_result = self.calendar_service.events().list(
                calendarId='primary',
                timeMin=now,
                timeMax=next_week,
                singleEvents=True,
                orderBy='startTime',
                maxResults=10
            ).execute()

            events = events_result.get('items', [])
            return self._format_calendar_response(events, "Upcoming Events")
        except Exception as e:
            return f"⚠️ Error fetching events: {str(e)}"

    async def start_event_create(self, message):
        """Initiate event creation workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
            'type': 'create_event',
            'step': 'title',
            'event_data': {}
        }
        return {
            'type': 'quick_reply',
            'text': "Let's create an event! What's the title?",
            'options': {'cancel': '❌ Cancel'}
        }

    async def find_time_slot(self, message):
        """Find and display the next 5 available time slots with dynamic durations"""
        if not self.calendar_service:
            return "⚠️ Calendar service not configured"

        try:
            # Define the time range for the search (next 24 hours)
            now = datetime.now(UTC)
            end_time = now + timedelta(days=1)

            # FreeBusy Request
            freebusy_request = {
                "timeMin": now.isoformat(),
                "timeMax": end_time.isoformat(),
                "items": [{"id": 'primary'}]
            }

            freebusy_response = self.calendar_service.freebusy().query(body=freebusy_request).execute()
            busy_slots = freebusy_response['calendars']['primary']['busy']

            # Slot-Berechnung
            available_slots = self._calculate_efficient_slots(
                busy_slots,
                self.duration_minutes
            )

            # Format the response for WhatsApp
            return {
                'type': 'list',
                'header': "⏰ Available Time Slots",
                'body': "Tap to select a time slot",
                'footer': "Time Slot Finder",
                'sections': [{
                    'title': "Next 5 Available Slots",
                    'rows': [{
                        'id': f"slot_{slot['start'].timestamp()}",
                        'title': f"🕒 {slot['start'].strftime('%H:%M')} - {slot['end'].strftime('%H:%M')}",
                        'description': f"Duration: {slot['duration']}"
                    } for slot in available_slots[:5]]
                }]
            }
        except Exception as e:
            return f"⚠️ Error finding time slots: {str(e)}"

    def _calculate_efficient_slots(self, busy_slots, duration_minutes):
        """Effiziente Slot-Berechnung"""
        available_slots = []
        current = datetime.now(UTC)
        end_time = current + timedelta(days=1)

        while current < end_time:
            slot_end = current + timedelta(minutes=duration_minutes)

            if slot_end > end_time:
                break

            is_available = all(
                slot_end <= parser.parse(busy['start']) or
                current >= parser.parse(busy['end'])
                for busy in busy_slots
            )

            if is_available:
                available_slots.append({
                    'start': current,
                    'end': slot_end,
                    'duration': f"{duration_minutes} min"
                })
                current = slot_end
            else:
                current += timedelta(minutes=15)

        return available_slots

    async def handle_calendar_actions(self, message):
        """Handle calendar-related pending actions"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'create_event':
            return await self._handle_event_creation(message, user_state)

        return None

    async def _handle_event_creation(self, message, state):
        step = state['step']
        event_data = state['event_data']

        if step == 'title':
            event_data['summary'] = message.content
            state['step'] = 'start_time'
            return "📅 When should it start? (e.g., 'tomorrow 2pm' or '2024-03-20 14:30')"

        elif step == 'start_time':
            event_data['start'] = self._parse_time(message.content)
            state['step'] = 'end_time'
            return "⏰ When should it end? (e.g., '3pm' or '2024-03-20 15:30')"

        elif step == 'end_time':
            event_data['end'] = self._parse_time(message.content, reference=event_data['start'])
            state['step'] = 'description'
            return "📝 Add a description (or type 'skip')"

        elif step == 'description':
            if message.content.lower() != 'skip':
                event_data['description'] = message.content
            state['step'] = 'confirm_envet'
            return self._create_confirmation_message(event_data)

    def _format_calendar_response(self, events, title):
        """Enhanced calendar formatting with interactive support"""
        if not events:
            return f"📅 No {title.lower()} found"

        return {
            'type': 'list',
            'header': title,
            'body': "Tap to view event details",
            "footer": "-- Calendar --",
            'sections': [{
                'title': f"{len(events)} Events",
                'rows': [{
                    'id': f"event_{event['id']}",
                    'title': f"📅 {event['summary']}"[:23],
                    'description': self._format_event_time(event)[:45]
                } for event in events[:5]]
            }]
        }

    def _parse_iso_to_readable(self, iso_str):
        """Convert ISO datetime to readable format"""
        dt = datetime.fromisoformat(iso_str.replace('Z', '+00:00'))
        return dt.strftime("%a %d %b %Y %H:%M")

    def _parse_time(self, time_str, reference=None):
        """
        Konvertiert natürliche Sprache zu präziser Datetime

        Unterstützt:
        - 'heute'
        - 'morgen'
        - 'in einer woche'
        - '10 uhr'
        - '10pm'
        - 'nächsten montag'
        """
        if reference is None:
            reference = datetime.now()

        try:
            import dateparser

            # Dateparser für flexibel Zeitparsing
            parsed_time = dateparser.parse(
                time_str,
                settings={
                    'PREFER_DATES_FROM': 'future',
                    'RELATIVE_BASE': reference,
                    'TIMEZONE': 'Europe/Berlin'
                }
            )

            if parsed_time is None:
                # Fallback auf dateutil wenn dateparser scheitert
                parsed_time = parser .parse(time_str, fuzzy=True, default=reference)

            return parsed_time

        except Exception as e:
            print(f"Zeitparsing-Fehler: {e}")
            return reference

    def _calculate_free_slots(self, start, end, busy_slots):
        """Calculate free time slots between busy periods"""
        # Implementation would calculate available windows
        return [{
            'start': "09:00",
            'end': "11:00",
            'duration': "2 hours"
        }]

    def _create_confirmation_message(self, event_data):
        """Create event confirmation message"""
        details = [
            f"📌 Title: {event_data['summary']}",
            f"🕒 Start: {self._parse_iso_to_readable(event_data['start'])}",
            f"⏰ End: {self._parse_iso_to_readable(event_data['end'])}",
            f"📝 Description: {event_data.get('description', 'None')}"
        ]
        return {
            'type': 'quick_reply',
            'text': "\n".join(details),
            'options': {'confirm': '✅ Confirm', 'cancel': '❌ Cancel'}
        }

    def _create_calendar_event(self, event_data):
        """Create event through Calendar API"""
        event = {
            'summary': event_data['summary'],
            'start': {'dateTime': event_data['start']},
            'end': {'dateTime': event_data['end']},
        }
        if 'description' in event_data:
            event['description'] = event_data['description']

        return self.calendar_service.events().insert(
            calendarId='primary',
            body=event
        ).execute()

    async def system_status(self, message):
        o = (datetime.now() - self.start_time)
        o.microseconds = 0
        status = {
            "🤖 Agent": "Online" if self.agent else "Offline",
            "📧 Email": "Connected" if self.gmail_service else "Disconnected",
            "📅 Calendar": "Connected" if self.calendar_service else "Disconnected",
            "📄 Documents": "Connected" if self.blob_docs_system else "Disconnected",
            "⏳ Uptime": f"{str(o.isoformat())}"
        }
        return "\n".join([f"{k}: {v}" for k, v in status.items()])

    async def restart_system(self, message):
        message.reply("🔄 System restart initiated...")
        time.sleep(1)
        await self.clear_memory(message)
        time.sleep(1)
        return  "✅ System restarted"

    # Updated document handlers
    async def list_documents(self, message, filter_type=None):
        docs = self.blob_docs_system.list_documents(filter_type)
        if len(docs) == 0:
            return "No docs found"
        else:
            return str(docs)
        return {
            'type': 'list',
            'body': 'Stored Documents',
            'action': {
                'sections': [{
                    'title': 'Your Documents',
                    'rows': [{
                        'id': doc['id'],
                        'title': f"{self._get_icon(doc['type'])} {doc['name']}"[:23],
                        'description': f"{doc['type'].title()} | {self._format_size(doc['size'])} | {doc['modified']}"[:29]
                    } for doc in docs[:10]]
                }]}
        }

    async def start_document_upload(self, message):
        """Initiate document upload workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'document', 'step': 'awaiting_file'}
        return {
            'type': 'quick_reply',
            'text': '📤 Send me the file you want to upload',
            'options': {'cancel': '❌ Cancel Upload'}
        }

    async def search_documents(self, message):
        """Initiate document search workflow"""
        self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'search', 'step': 'awaiting_query'}
        return {
            'type': 'quick_reply',
            'text': '🔍 What are you looking for?',
            'options': {'cancel': '❌ Cancel Search'}
        }

    async def handle_media_message(self, message: 'Message'):
        """Handle document/image/video uploads"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('step') == 'awaiting_file':
            file_type = message.type
            if file_type not in ['document', 'image', 'video']:
                return "Unsupported file type"

            try:
                # Download media
                #media_url = message.document.url if hasattr(message, 'document') else \
                #    message.image.url if hasattr(message, 'image') else \
                #        message.video.url
                if file_type =='video':
                    content = self.whc.messenger.get_video(message.data)
                if file_type =='image':
                    content = self.whc.messenger.get_image(message.data)
                if file_type =='document':
                    content = self.whc.messenger.get_document(message.data)
                print("Media content:", content)
                media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')),  mime_type=content.get('mime_type'), file_path='.data/temp')
                print("Media media_data:", media_data)
                # Save to blob storage
                filename = f"file_{file_type}_{datetime.now().isoformat()}_{content.get('sha256', '')}"
                blob_id = self.blob_docs_system.save_document(
                    open(media_data, 'rb').read(),
                    filename=filename,
                    file_type=file_type
                )

                self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                return f"✅ File uploaded successfully!\nID: {blob_id}"

            except Exception as e:
                logging.error(f"Upload failed: {str(e)}")
                return f"❌ Failed to upload file Error : {str(e)}"

        return "No pending uploads"

    async def delete_document(self, message):
        """Delete document workflow"""
        docs = self.blob_docs_system.list_documents()
        return {
            'type': 'quick_reply',
            'text': 'Select document to delete:',
            'options': {doc['id']: doc['name'] for doc in docs[:5]},
            'handler': self._confirm_delete
        }

    async def _confirm_delete(self, doc_id, message):
        """Confirm deletion workflow"""
        doc = next((d for d in self.blob_docs_system.list_documents() if d['id'] == doc_id), None)
        if not doc:
            return "Document not found"

        if self.blob_docs_system.delete_document(doc_id):
            return f"✅ {doc['name']} deleted successfully"
        return "❌ Failed to delete document"

    # Helper methods
    def _get_icon(self, file_type: str) -> str:
        icons = {
            'document': '📄',
            'image': '🖼️',
            'video': '🎥'
        }
        return icons.get(file_type, '📁')

    def _format_size(self, size: int) -> str:
        if size < 1024:
            return f"{size}B"
        elif size < 1024 ** 2:
            return f"{size / 1024:.1f}KB"
        elif size < 1024 ** 3:
            return f"{size / (1024 ** 2):.1f}MB"
        return f"{size / (1024 ** 3):.1f}GB"

    # Utility Methods

    def _clean_processed_messages(self):
        """Clean old messages from processed cache"""
        now = time.time()
        self.processed_messages = {
            msg_id for msg_id, timestamp in self.processed_messages
            if now - timestamp < 3600  # 1 hour retention
        }

    def send_email(self, to, subject, body):
        """Actual email sending function to be called by agent"""
        if not self.gmail_service:
            return False

        message = MIMEText(body)
        message['to'] = to
        message['subject'] = subject

        encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode()
        self.gmail_service.users().messages().send(
            userId='me',
            body={'raw': encoded_message}
        ).execute()
        return True

    async def start_agent(self, *a):
        """Start the agent in background mode"""
        if self.agent:
            self.agent.run_in_background()
            return True
        return False

    async def stop_agent(self, *b):
        """Stop the currently running agent"""
        if self.agent:
            self.agent.stop()
            return True
        return False

    async def show_task_stack(self, *a):
        """Display current task stack"""
        if self.agent and len(self.agent.taskstack.tasks) > 0:
            tasks = self.agent.taskstack.tasks
            return self.agent.mini_task("\n".join([f"Task {t.id}: {t.description}" for t in tasks]), "system", "Format to nice and clean whatsapp format")
        return "No tasks in stack"

    def run(self):
        """Start the WhatsApp assistant"""
        try:
            self.state = AssistantState.ONLINE
            # Send welcome message

            mas = self.whc.messenger.create_message(
                content="Digital Assistant is online! Send /help for available commands.",to=self.whc.progress_messenger0.recipient_phone,
            ).send(sender=0)
            mas_id = mas.get("messages", [{}])[0].get("id")
            print(mas_id)

        except Exception as e:
            logging.error(f"Assistant error: {str(e)}")
            self.state = AssistantState.OFFLINE
            raise

    async def handle_agent_actions(self, message):
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})
        def helper():

            stop_flag = threading.Event()
            try:
                progress = self.progress_messengers['task']
                # message_id = progress.send_initial_message(mode="loading")
                progress.message_id = message.id
                progress.start_loading_in_background(stop_flag)
                res = message.content
                print(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get(
                    'context'))
                if context := message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get(
                    'context'):
                    context_str = f"Context : source {'USER' if context.get('from') in self.whc.progress_messenger0.recipient_phone else 'AGENT'}"
                    cd = self.history.get(context.get('id'))
                    context_str += "\n" + (cd if cd is not None else "The ref Message is not in the history")
                    res += "\n" + context_str
                if user_state.get('type') == 'system':
                    res = self.isaa.run(res)
                    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                elif user_state.get('type') == 'self-agent':
                    res = self.agent.run(res)
                    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
                self.agent.mode = LLMMode(
                    name="Chatter",
                    description="whatsapp Chat LLM",
                    system_msg="Response precise and short style using whatsapp syntax!",
                    post_msg=None
                )
                response = self.agent.mini_task(res, "user", persist=True)
                self.save_reply(message, response)
            except Exception as e:
                stop_flag.set()
                message.reply("❌ Error in agent "+str(e))
            finally:
                self.agent.mode = None
                stop_flag.set()
        threading.Thread(target=helper, daemon=True).start()

    def save_reply(self, message, content):
        res = message.reply(content)
        res_id = res.get("messages", [{}])[0].get("id")
        if res_id is not None:
            self.history.set(res_id, content)
        else:
            print(f"No ID to add to history: {res}")
agent_task(message) async

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
757
758
759
760
761
762
763
764
765
766
767
async def agent_task(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'self-agent',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "Now prompt the self-agent 📝",
        'options': {'cancel': '❌ Cancel Search'}
    }
check_emails(message, query='') async

Improved email checking with WhatsApp API formatting

Source code in toolboxv2/mods/WhatsAppTb/client.py
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
async def check_emails(self, message, query=""):
    """Improved email checking with WhatsApp API formatting"""
    if not self.gmail_service:
        return "⚠️ Gmail service not configured"

    try:
        results = self.gmail_service.users().messages().list(
            userId='me',
            maxResults=10,
            labelIds=['INBOX'],
            q=query
        ).execute()

        emails = []
        for msg in results.get('messages', [])[:10]:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=msg['id'],
                format='metadata'
            ).execute()

            headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
            emails.append({
                'id': msg['id'],
                'from': headers.get('From', 'Unknown'),
                'subject': headers.get('Subject', 'No Subject'),
                'date': headers.get('Date', 'Unknown'),
                'snippet': email_data.get('snippet', ''),
                'unread': 'UNREAD' in email_data.get('labelIds', [])
            })

        return {
            'type': 'list',
            'header': '📨 Recent Emails',
            'body': 'Tap to view full email',
            'footer': 'Email Manager',
            'sections': [{
                'title': f"Inbox ({len(emails)} emails)",
                'rows': [{
                    'id': f"email_{email['id']}",
                    'title': f"{'📬' if email['unread'] else '📭'} {email['subject']}"[:23],
                    'description': f"From: {email['from']}\n{email['snippet']}"[:45]
                } for email in emails]
            }]
        }
    except Exception as e:
        return f"⚠️ Error fetching emails: {str(e)}"
complete_authorization(message)

Complete the authorization process using the authorization code

:param authorization_code: Authorization code received from Google

Source code in toolboxv2/mods/WhatsAppTb/client.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
def complete_authorization(self, message: Message):
    """
    Complete the authorization process using the authorization code

    :param authorization_code: Authorization code received from Google
    """
    from google_auth_oauthlib.flow import Flow
    authorization_code = message.content
    # Define the scopes required for Gmail and Calendar
    SCOPES = [
        'https://www.googleapis.com/auth/gmail.modify',
        'https://www.googleapis.com/auth/calendar'
    ]

    # Create a flow instance to manage the OAuth 2.0 authorization process
    flow = Flow.from_client_secrets_file(
        self.credentials_path,
        scopes=SCOPES,
        redirect_uri='urn:ietf:wg:oauth:2.0:oob'
    )

    # Exchange the authorization code for credentials
    flow.fetch_token(code=authorization_code)
    self.credentials = flow.credentials

    # Save the credentials for future use
    self.save_credentials()

    # Initialize services
    self.init_services()
    return "Done"
delete_document(message) async

Delete document workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1426
1427
1428
1429
1430
1431
1432
1433
1434
async def delete_document(self, message):
    """Delete document workflow"""
    docs = self.blob_docs_system.list_documents()
    return {
        'type': 'quick_reply',
        'text': 'Select document to delete:',
        'options': {doc['id']: doc['name'] for doc in docs[:5]},
        'handler': self._confirm_delete
    }

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
876
877
878
879
880
881
882
883
884
885
886
async def email_search(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'email_search',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "🔍 What would you like to search for?",
        'options': {'cancel': '❌ Cancel Search'}
    }
email_summary(message) async

Generate AI-powered email summaries

Source code in toolboxv2/mods/WhatsAppTb/client.py
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
async def email_summary(self, message):
    """Generate AI-powered email summaries"""
    try:
        messages = self.gmail_service.users().messages().list(
            userId='me',
            maxResults=3,
            labelIds=['INBOX']
        ).execute().get('messages', [])

        email_contents = []
        for msg in messages[:3]:
            email_data = self.gmail_service.users().messages().get(
                userId='me',
                id=msg['id'],
                format='full'
            ).execute()
            email_contents.append(self._parse_email_content(email_data))

        summary = self.agent.mini_task(
            "\n\n".join(email_contents) , "system", "Summarize these emails in bullet points with key details:"
        )

        return f"📋 Email Summary:\n{summary}\n\n*Powered by AI*"
    except Exception as e:
        logging.error(f"Summary failed: {str(e)}")
        return f"❌ Could not generate summary: {str(e)}"
find_time_slot(message) async

Find and display the next 5 available time slots with dynamic durations

Source code in toolboxv2/mods/WhatsAppTb/client.py
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
async def find_time_slot(self, message):
    """Find and display the next 5 available time slots with dynamic durations"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        # Define the time range for the search (next 24 hours)
        now = datetime.now(UTC)
        end_time = now + timedelta(days=1)

        # FreeBusy Request
        freebusy_request = {
            "timeMin": now.isoformat(),
            "timeMax": end_time.isoformat(),
            "items": [{"id": 'primary'}]
        }

        freebusy_response = self.calendar_service.freebusy().query(body=freebusy_request).execute()
        busy_slots = freebusy_response['calendars']['primary']['busy']

        # Slot-Berechnung
        available_slots = self._calculate_efficient_slots(
            busy_slots,
            self.duration_minutes
        )

        # Format the response for WhatsApp
        return {
            'type': 'list',
            'header': "⏰ Available Time Slots",
            'body': "Tap to select a time slot",
            'footer': "Time Slot Finder",
            'sections': [{
                'title': "Next 5 Available Slots",
                'rows': [{
                    'id': f"slot_{slot['start'].timestamp()}",
                    'title': f"🕒 {slot['start'].strftime('%H:%M')} - {slot['end'].strftime('%H:%M')}",
                    'description': f"Duration: {slot['duration']}"
                } for slot in available_slots[:5]]
            }]
        }
    except Exception as e:
        return f"⚠️ Error finding time slots: {str(e)}"
generate_authorization_url(*a) async

Generate an authorization URL for user consent

:return: Authorization URL for the user to click and authorize access

Source code in toolboxv2/mods/WhatsAppTb/client.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
async def generate_authorization_url(self, *a):
    """
    Generate an authorization URL for user consent

    :return: Authorization URL for the user to click and authorize access
    """
    from google_auth_oauthlib.flow import Flow
    # Define the scopes required for Gmail and Calendar
    SCOPES = [
        'https://www.googleapis.com/auth/gmail.modify',
        'https://www.googleapis.com/auth/calendar'
    ]

    # Create a flow instance to manage the OAuth 2.0 authorization process
    flow = Flow.from_client_secrets_file(
        self.credentials_path,
        scopes=SCOPES,
        redirect_uri='urn:ietf:wg:oauth:2.0:oob'  # Use 'urn:ietf:wg:oauth:2.0:oob' for desktop apps
    )

    # Generate the authorization URL
    authorization_url, _ = flow.authorization_url(
        access_type='offline',  # Allows obtaining refresh token
        prompt='consent'  # Ensures user is always prompted for consent
    )
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'auth',
                                                                          'step': 'awaiting_key'}
    return {
        'type': 'quick_reply',
        'text': f'Url to log in {authorization_url}',
        'options': {'cancel': '❌ Cancel Upload'}
    }
get_email_details(email_id) async

Retrieve and format full email details

Source code in toolboxv2/mods/WhatsAppTb/client.py
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
async def get_email_details(self, email_id):
    """Retrieve and format full email details"""
    if not self.gmail_service:
        return "⚠️ Gmail service not configured"

    try:
        email_data = self.gmail_service.users().messages().get(
            userId='me',
            id=email_id,
            format='full'
        ).execute()

        headers = {h['name']: h['value'] for h in email_data['payload']['headers']}
        body = ""
        for part in email_data.get('payload', {}).get('parts', []):
            if part['mimeType'] == 'text/plain':
                body = base64.urlsafe_b64decode(part['body']['data']).decode('utf-8')
                break

        formatted_text = (
            f"📧 *Email Details*\n\n"
            f"From: {headers.get('From', 'Unknown')}\n"
            f"Subject: {headers.get('Subject', 'No Subject')}\n"
            f"Date: {headers.get('Date', 'Unknown')}\n\n"
            f"{body[:15000]}{'...' if len(body) > 15000 else ''}"
        )
        return  self.agent.mini_task(
            formatted_text , "system", "Summarize the email in bullet points with key details"
        )
    except Exception as e:
        return f"⚠️ Error fetching email: {str(e)}"
get_event_details(event_id) async

Retrieve and format calendar event details with location support

Source code in toolboxv2/mods/WhatsAppTb/client.py
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
async def get_event_details(self, event_id):
    """Retrieve and format calendar event details with location support"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        event = self.calendar_service.events().get(
            calendarId='primary',
            eventId=event_id
        ).execute()

        response = [ (
                f"📅 *Event Details*\n\n"
                f"Title: {event.get('summary', 'No title')}\n"
                f"Time: {self._format_event_time(event)}\n"
                f"Location: {event.get('location', 'Not specified')}\n\n"
                f"{event.get('description', 'No description')[:1000]}"
            )]

        if 'geo' in event:
            response.append({
                'lat': float(event['geo']['latitude']),
                'long': float(event['geo']['longitude']),
                'name': event.get('location', 'Event Location'),
                'address': event.get('location', ''),
                'recipient_id': self.whc.progress_messenger0.recipient_phone
            })
        return response
    except Exception as e:
        return f"⚠️ Error fetching event: {str(e)}"
handle_audio_message(message) async

Process audio messages with STT and TTS

Source code in toolboxv2/mods/WhatsAppTb/client.py
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
async def handle_audio_message(self, message: 'Message'):
    """Process audio messages with STT and TTS"""
    # Download audio
    progress = self.progress_messengers['task']
    stop_flag = threading.Event()
    # message_id = progress.send_initial_message(mode="loading")
    progress.message_id = message.id
    progress.start_loading_in_background(stop_flag)

    content = self.whc.messenger.get_audio(message.data)
    audio_file_name = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')), mime_type='audio/opus', file_path=".data/temp")
    print(f"audio_file_name {audio_file_name}")
    if audio_file_name is None:
        message.reply("Could not process audio file")
        stop_flag.set()
        return

    text = self.stt(audio_file_name)['text']
    if not text:
        message.reply("Could not process audio")
        stop_flag.set()
        return

    message.reply("Transcription :\n "+ text)
    message.content = text
    agent_res = await self.helper_text(message, return_text=True)

    if agent_res is not None:
        pass

    stop_flag.set()
handle_button_interaction(content, message) async

Handle button click interactions

Source code in toolboxv2/mods/WhatsAppTb/client.py
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
async def handle_button_interaction(self, content: dict, message: Message):
    """Handle button click interactions"""
    button_id = content['id']

    # First check if it's a main menu button
    if button_id in self.buttons:
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button=self.buttons[button_id]
        )
        return

    # Handle action buttons
    action_handlers = {
        # Agent controls
        'start': self.start_agent,
        'stop': self.stop_agent,
        'tasks': self.show_task_stack,
        'memory': self.clear_memory,
        'system-task': self.system_task,
        'agent-task': self.agent_task,

        # Email controls
        'check': self.check_emails,
        'send': self.start_email_compose,
        'summary': self.email_summary,
        'search': self.email_search,

        # Calendar controls
        'today': self.show_today_events,
        'add': self.start_event_create,
        'upcoming': self.show_upcoming_events,
        'find_slot': self.find_time_slot,

        # Document controls
        'upload': self.start_document_upload,
        'list': self.list_documents,
        'search_docs': self.search_documents,
        'delete': self.delete_document,

        # System controls
        'status': self.system_status,
        'restart': self.restart_system,
        'connect': self.generate_authorization_url,

        'cancel': self.cancel,
        'confirm': self.confirm,
    }
    if button_id in action_handlers:
        try:
            # Start progress indicator
            progress = self.progress_messengers['task']
            stop_flag = threading.Event()
            # message_id = progress.send_initial_message(mode="loading")
            progress.message_id = message.id
            progress.start_loading_in_background(stop_flag)

            # Execute handler

            result = await action_handlers[button_id](message)


            # Send result
            if isinstance(result, str):
                self.save_reply(message, result)
            elif isinstance(result, dict):  # For structured responses
                self.send_structured_response(result)

            stop_flag.set()
        finally:
            #except Exception as e:
            stop_flag.set()
        #    message.reply(f"❌ Error processing {button_id}: {str(e)}")
    elif 'event_' in button_id:
        res = await self.get_event_details(button_id.replace("event_", ''))
        if isinstance(res, str):
            self.save_reply(message, res)
            return
        for r in res:
            if isinstance(r, str):
                self.save_reply(message, r)
            else:
                self.whc.messenger.send_location(**r)

    elif 'email_' in button_id:
        res = await self.get_email_details(button_id.replace("email_", ''))
        self.save_reply(message, res)
    else:
        message.reply("⚠️ Unknown command")
handle_calendar_actions(message) async

Handle calendar-related pending actions

Source code in toolboxv2/mods/WhatsAppTb/client.py
1193
1194
1195
1196
1197
1198
1199
1200
async def handle_calendar_actions(self, message):
    """Handle calendar-related pending actions"""
    user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

    if user_state.get('type') == 'create_event':
        return await self._handle_event_creation(message, user_state)

    return None
handle_email_actions(message) async

Handle multi-step email workflows

Source code in toolboxv2/mods/WhatsAppTb/client.py
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
    async def handle_email_actions(self, message):
        """Handle multi-step email workflows"""
        user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

        if user_state.get('type') == 'compose_email':
            return await self._handle_email_composition(message, user_state)
        if user_state.get('type') == 'email_search':
            return await self.check_emails(message, self.agent.mini_task("""Conventire Pezise zu einer googel str only query using : Gmail Suchoperatoren!

Basis-Operatoren:
- from: Absender
- to: Empfänger
- subject: Betreff
- label: Gmail Label
- has:attachment Anhänge
- newer_than:7d Zeitfilter
- before: Datum vor
- after: Datum nach

Erweiterte Operatoren:
- in:inbox
- in:sent
- in:spam
- cc: Kopie
- bcc: Blindkopie
- is:unread
- is:read
- larger:10M Größenfilter
- smaller:5M
- filename:pdf Dateityp

Profi-Tipps:
- Kombinierbar mit UND/ODER
- Anführungszeichen für exakte Suche
- Negation mit -
 beispeile : 'Ungelesene Mails letzte Woche': -> 'is:unread newer_than:7d'

""", "user",message.content))


        return None
handle_interactive(message) async

Handle all interactive messages

Source code in toolboxv2/mods/WhatsAppTb/client.py
533
534
535
536
537
538
539
async def handle_interactive(self, message: Message):
    """Handle all interactive messages"""
    content = self.whc.messenger.get_interactive_response(message.data)
    if content.get("type") == "list_reply":
        await self.handle_button_interaction(content.get("list_reply"), message)
    elif content.get("type") == "button_reply":
        print(content)
handle_media_message(message) async

Handle document/image/video uploads

Source code in toolboxv2/mods/WhatsAppTb/client.py
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
async def handle_media_message(self, message: 'Message'):
    """Handle document/image/video uploads"""
    user_state = self.pending_actions.get(self.whc.progress_messenger0.recipient_phone, {})

    if user_state.get('step') == 'awaiting_file':
        file_type = message.type
        if file_type not in ['document', 'image', 'video']:
            return "Unsupported file type"

        try:
            # Download media
            #media_url = message.document.url if hasattr(message, 'document') else \
            #    message.image.url if hasattr(message, 'image') else \
            #        message.video.url
            if file_type =='video':
                content = self.whc.messenger.get_video(message.data)
            if file_type =='image':
                content = self.whc.messenger.get_image(message.data)
            if file_type =='document':
                content = self.whc.messenger.get_document(message.data)
            print("Media content:", content)
            media_data = self.whc.messenger.download_media(media_url=self.whc.messenger.query_media_url(media_id=content.get('id')),  mime_type=content.get('mime_type'), file_path='.data/temp')
            print("Media media_data:", media_data)
            # Save to blob storage
            filename = f"file_{file_type}_{datetime.now().isoformat()}_{content.get('sha256', '')}"
            blob_id = self.blob_docs_system.save_document(
                open(media_data, 'rb').read(),
                filename=filename,
                file_type=file_type
            )

            self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
            return f"✅ File uploaded successfully!\nID: {blob_id}"

        except Exception as e:
            logging.error(f"Upload failed: {str(e)}")
            return f"❌ Failed to upload file Error : {str(e)}"

    return "No pending uploads"
handle_message(message) async

Main message handler for incoming WhatsApp messages

Source code in toolboxv2/mods/WhatsAppTb/client.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
async def handle_message(self, message: 'Message'):
    """Main message handler for incoming WhatsApp messages"""

    # Deduplication check
    with self.message_lock:
        if message.id in self.processed_messages:
            return
        last_ts = time.time()
        print(last_ts)
        if len(self.processed_messages) > 0:
            m_id, last_ts = self.processed_messages.pop()
            self.processed_messages.add((m_id, last_ts))

        print("DUPLICATION P", message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0) , last_ts)
        if float(message.data.get('entry', [{}])[0].get('changes', [{}])[0].get('value', {}).get('messages', [{}])[0].get('timestamp', 0)) < last_ts - 120:
            return
        self.processed_messages.add((message.id, time.perf_counter()))

    # Mark message as read
    message.mark_as_read()

    # Extract content and type
    content_type = message.type
    content = message.content

    print(f"message.content {content=} {content_type=} {message.data=}")

    try:
        if content_type == 'interactive':
            await self.handle_interactive(message)
        elif content_type == 'audio':
            await self.handle_audio_message(message)
        elif content_type in ['document', 'image', 'video']:
            response = await self.handle_media_message(message)
            self.save_reply(message, response)
        elif content_type == 'text':
            if content.lower() == "menu":
                self.whc.messenger.send_button(
                    recipient_id=self.whc.progress_messenger0.recipient_phone,
                    button=self.buttons[content.lower()]
                )
            else:
                await self.helper_text(message)
        else:
            message.reply("Unsupported message type")
    #except Exception as e:
    #    logging.error(f"Message handling error: {str(e)}")
    #   message.reply("❌ Error processing request")
    finally:
        # Cleanup old messages (keep 1 hour history)
        with self.message_lock:
            self._clean_processed_messages()
init_services()

Initialize Gmail and Calendar services

Source code in toolboxv2/mods/WhatsAppTb/client.py
281
282
283
284
285
286
287
288
289
def init_services(self):
    """
    Initialize Gmail and Calendar services
    """
    from googleapiclient.discovery import build

    self.gmail_service = build('gmail', 'v1', credentials=self.credentials)
    self.calendar_service = build('calendar', 'v3', credentials=self.credentials)
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {}
load_credentials()

Load previously saved credentials if available

:return: Whether credentials were successfully loaded

Source code in toolboxv2/mods/WhatsAppTb/client.py
267
268
269
270
271
272
273
274
275
276
277
278
def load_credentials(self):
    """
    Load previously saved credentials if available

    :return: Whether credentials were successfully loaded
    """
    try:
        self.credentials = Credentials.from_authorized_user_file('token/google_token.json')
        self.init_services()
        return True
    except FileNotFoundError:
        return False
run()

Start the WhatsApp assistant

Source code in toolboxv2/mods/WhatsAppTb/client.py
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
def run(self):
    """Start the WhatsApp assistant"""
    try:
        self.state = AssistantState.ONLINE
        # Send welcome message

        mas = self.whc.messenger.create_message(
            content="Digital Assistant is online! Send /help for available commands.",to=self.whc.progress_messenger0.recipient_phone,
        ).send(sender=0)
        mas_id = mas.get("messages", [{}])[0].get("id")
        print(mas_id)

    except Exception as e:
        logging.error(f"Assistant error: {str(e)}")
        self.state = AssistantState.OFFLINE
        raise
save_credentials()

Save the obtained credentials to a file for future use

Source code in toolboxv2/mods/WhatsAppTb/client.py
256
257
258
259
260
261
262
263
264
def save_credentials(self):
    """
    Save the obtained credentials to a file for future use
    """
    if not os.path.exists('token'):
        os.makedirs('token')

    with open('token/google_token.json', 'w') as token_file:
        token_file.write(self.credentials.to_json())
search_documents(message) async

Initiate document search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1377
1378
1379
1380
1381
1382
1383
1384
async def search_documents(self, message):
    """Initiate document search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'search', 'step': 'awaiting_query'}
    return {
        'type': 'quick_reply',
        'text': '🔍 What are you looking for?',
        'options': {'cancel': '❌ Cancel Search'}
    }
send_email(to, subject, body)

Actual email sending function to be called by agent

Source code in toolboxv2/mods/WhatsAppTb/client.py
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
def send_email(self, to, subject, body):
    """Actual email sending function to be called by agent"""
    if not self.gmail_service:
        return False

    message = MIMEText(body)
    message['to'] = to
    message['subject'] = subject

    encoded_message = base64.urlsafe_b64encode(message.as_bytes()).decode()
    self.gmail_service.users().messages().send(
        userId='me',
        body={'raw': encoded_message}
    ).execute()
    return True
send_structured_response(result)

Send complex responses using appropriate WhatsApp features

Source code in toolboxv2/mods/WhatsAppTb/client.py
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
def send_structured_response(self, result: dict):
    """Send complex responses using appropriate WhatsApp features"""
    if result['type'] == 'list':
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button={
                'header': result.get('header', ''),
                'body': result.get('body', ''),
                'footer': result.get('footer', ''),
                'action': {
                    'button': 'Action',
                    'sections': result['sections']
                }
            }
        )
    elif result['type'] == 'quick_reply':
        self.whc.messenger.send_button(
            recipient_id=self.whc.progress_messenger0.recipient_phone,
            button={
                'header': "Quick reply",
                'body': result['text'],
                'footer': '',
                'action': {'button': 'Action', 'sections': [{
                    'title': 'View',
                    'rows': [{'id': k, 'title': v[:23]} for k, v in result['options'].items()]
                }]}
            }
        )

    elif result['type'] == 'media':
        if result['media_type'] == 'image':
            self.whc.messenger.send_image(
                image=result['url'],
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                caption=result.get('caption', '')
            )
        elif result['media_type'] == 'document':
            self.whc.messenger.send_document(
                document=result['url'],
                recipient_id=self.whc.progress_messenger0.recipient_phone,
                caption=result.get('caption', '')
            )
setup_interaction_buttons()

Define WhatsApp interaction buttons for different functionalities

Source code in toolboxv2/mods/WhatsAppTb/client.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
def setup_interaction_buttons(self):
    """Define WhatsApp interaction buttons for different functionalities"""
    self.buttons = {
        'menu': {
            'header': 'Digital Assistant',
            'body': 'Please select an option:',
            'footer': '-- + --',
            'action': {
                'button': 'Menu',
                'sections': [
                    {
                        'title': 'Main Functions',
                        'rows': [
                            {'id': 'agent', 'title': 'Agent Controls', 'description': 'Manage your AI assistant'},
                            {'id': 'email', 'title': 'Email Management', 'description': 'Handle your emails'},
                            {'id': 'calendar', 'title': 'Calendar', 'description': 'Manage your schedule'},
                            {'id': 'docs', 'title': 'Documents', 'description': 'Handle documents'},
                            {'id': 'system', 'title': 'System', 'description': 'System controls and metrics'}
                        ]
                    }
                ]
            }
        },
        'agent': self._create_agent_controls_buttons(),
        'email': self._create_email_controls_buttons(),
        'calendar': self._create_calendar_controls_buttons(),
        'docs': self._create_docs_controls_buttons(),
        'system': self._create_system_controls_buttons()
    }
setup_progress_messengers()

Initialize progress messengers for different types of tasks

Source code in toolboxv2/mods/WhatsAppTb/client.py
291
292
293
294
295
296
297
def setup_progress_messengers(self):
    """Initialize progress messengers for different types of tasks"""
    self.progress_messengers = {
        'task': self.whc.progress_messenger0,
        'email': self.whc.progress_messenger1,
        'calendar': self.whc.progress_messenger2
    }
show_task_stack(*a) async

Display current task stack

Source code in toolboxv2/mods/WhatsAppTb/client.py
1504
1505
1506
1507
1508
1509
async def show_task_stack(self, *a):
    """Display current task stack"""
    if self.agent and len(self.agent.taskstack.tasks) > 0:
        tasks = self.agent.taskstack.tasks
        return self.agent.mini_task("\n".join([f"Task {t.id}: {t.description}" for t in tasks]), "system", "Format to nice and clean whatsapp format")
    return "No tasks in stack"
show_today_events(message) async

Show today's calendar events

Source code in toolboxv2/mods/WhatsAppTb/client.py
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
async def show_today_events(self, message):
    """Show today's calendar events"""
    if not self.calendar_service:
        message.replay("service not online")

    now = datetime.utcnow().isoformat() + 'Z'
    end_of_day = (datetime.now() + timedelta(days=1)).replace(
        hour=0, minute=0, second=0).isoformat() + 'Z'

    events_result = self.calendar_service.events().list(
        calendarId='primary',
        timeMin=now,
        timeMax=end_of_day,
        singleEvents=True,
        orderBy='startTime'
    ).execute()

    events = events_result.get('items', [])
    return self._format_calendar_response(events, "Today's Events")
show_upcoming_events(message) async

Show upcoming events with interactive support

Source code in toolboxv2/mods/WhatsAppTb/client.py
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
async def show_upcoming_events(self, message):
    """Show upcoming events with interactive support"""
    if not self.calendar_service:
        return "⚠️ Calendar service not configured"

    try:
        now = datetime.utcnow().isoformat() + 'Z'
        next_week = (datetime.now() + timedelta(days=7)).isoformat() + 'Z'

        events_result = self.calendar_service.events().list(
            calendarId='primary',
            timeMin=now,
            timeMax=next_week,
            singleEvents=True,
            orderBy='startTime',
            maxResults=10
        ).execute()

        events = events_result.get('items', [])
        return self._format_calendar_response(events, "Upcoming Events")
    except Exception as e:
        return f"⚠️ Error fetching events: {str(e)}"
start_agent(*a) async

Start the agent in background mode

Source code in toolboxv2/mods/WhatsAppTb/client.py
1490
1491
1492
1493
1494
1495
async def start_agent(self, *a):
    """Start the agent in background mode"""
    if self.agent:
        self.agent.run_in_background()
        return True
    return False
start_document_upload(message) async

Initiate document upload workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1368
1369
1370
1371
1372
1373
1374
1375
async def start_document_upload(self, message):
    """Initiate document upload workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {'type': 'document', 'step': 'awaiting_file'}
    return {
        'type': 'quick_reply',
        'text': '📤 Send me the file you want to upload',
        'options': {'cancel': '❌ Cancel Upload'}
    }
start_email_compose(message) async

Enhanced email composition workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
888
889
890
891
892
893
894
895
896
897
898
899
async def start_email_compose(self, message):
    """Enhanced email composition workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'compose_email',
        'step': 'subject',
        'draft': {'attachments': []}
    }
    return {
        'type': 'quick_reply',
        'text': "📝 Let's compose an email\n\nSubject:",
        'options': {'cancel': '❌ Cancel Composition'}
    }
start_event_create(message) async

Initiate event creation workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
async def start_event_create(self, message):
    """Initiate event creation workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'create_event',
        'step': 'title',
        'event_data': {}
    }
    return {
        'type': 'quick_reply',
        'text': "Let's create an event! What's the title?",
        'options': {'cancel': '❌ Cancel'}
    }
stop_agent(*b) async

Stop the currently running agent

Source code in toolboxv2/mods/WhatsAppTb/client.py
1497
1498
1499
1500
1501
1502
async def stop_agent(self, *b):
    """Stop the currently running agent"""
    if self.agent:
        self.agent.stop()
        return True
    return False
system_task(message) async

Initiate email search workflow

Source code in toolboxv2/mods/WhatsAppTb/client.py
745
746
747
748
749
750
751
752
753
754
755
async def system_task(self, message):
    """Initiate email search workflow"""
    self.pending_actions[self.whc.progress_messenger0.recipient_phone] = {
        'type': 'system',
        'step': 'await_query'
    }
    return {
        'type': 'quick_reply',
        'text': "Now prompt the 🧠ISAA-System 📝",
        'options': {'cancel': '❌ Cancel Search'}
    }

server

AppManager
Source code in toolboxv2/mods/WhatsAppTb/server.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
class AppManager(metaclass=Singleton):
    pepper = "pepper0"

    def __init__(self, start_port: int = 8000, port_range: int = 10, em=None):
        self.instances: dict[str, dict] = {}
        self.start_port = start_port
        self.port_range = port_range
        self.threads: dict[str, Thread] = {}
        self.stop_events: dict[str, Event] = {}
        self.message_queue: asyncio.Queue = asyncio.Queue()
        self.last_messages: dict[str, datetime] = {}
        self.keys: dict[str, str] = {}
        self.forwarders: dict[str, dict] = {}
        self.runner = lambda :None

        if em is None:
            from toolboxv2 import get_app
            em = get_app().get_mod("EventManager")
        from toolboxv2.mods import EventManager
        self.event_manager: EventManager = em.get_manager()

        # Set up signal handlers for graceful shutdown
        try:
            if threading.current_thread() is threading.main_thread():
                signal.signal(signal.SIGINT, self.signal_handler)
                signal.signal(signal.SIGTERM, self.signal_handler)
        except Exception:
            pass

    def offline(self, instance_id):

        def mark_as_offline():
            self.forwarders[instance_id]['send'] = None
            return 'done'

        return mark_as_offline

    def online(self, instance_id):

        def mark_as_online():
            return self.instances[instance_id]['app']

        def set_callbacks(callback, e_callback=None):
            if callback is not None:
                self.forwarders[instance_id]['send'] = callback
            if e_callback is not None:
                self.forwarders[instance_id]['sende'] = e_callback

        return mark_as_online(), set_callbacks

    def get_next_available_port(self) -> int:
        """Find the next available port in the range."""
        used_ports = {instance['port'] for instance in self.instances.values()}
        for port in range(self.start_port, self.start_port + self.port_range):
            if port not in used_ports:
                return port
        raise RuntimeError("No available ports in range")

    def add_instance(self, instance_id: str, **kwargs):
        """
        Add a new app instance to the manager with automatic port assignment.
        """
        if instance_id in self.instances:
            raise ValueError(f"Instance {instance_id} already exists")

        port = self.get_next_available_port()
        app_instance = WhatsApp(**kwargs)

        self.instances[instance_id] = {
            'app': app_instance,
            'port': port,
            'kwargs': kwargs,
            'phone_number_id': kwargs.get("phone_number_id", {}),
            'retry_count': 0,
            'max_retries': 3,
            'retry_delay': 5
        }
        self.keys[instance_id] = Code.one_way_hash(kwargs.get("phone_number_id", {}).get("key"), "WhatsappAppManager",
                                                   self.pepper)
        self.forwarders[instance_id] = {}

        # Set up message handlers
        @app_instance.on_message
        async def message_handler(message):
            await self.on_message(instance_id, message)

        @app_instance.on_event
        async def event_handler(event):
            await self.on_event(instance_id, event)

        @app_instance.on_verification
        async def verification_handler(verification):
            await self.on_verification(instance_id, verification)

        # Create stop event for this instance Error parsing message1:
        self.stop_events[instance_id] = Event()

    def run_instance(self, instance_id: str):
        """Run a single instance in a separate thread with error handling and automatic restart."""
        instance_data = self.instances[instance_id]
        stop_event = self.stop_events[instance_id]

        while not stop_event.is_set():
            try:
                logger.info(f"Starting instance {instance_id} on port {instance_data['port']}")
                instance_data['app'].run(host='0.0.0.0', port=instance_data['port'])

            except Exception as e:
                logger.error(f"Error in instance {instance_id}: {str(e)}")
                instance_data['retry_count'] += 1

                if instance_data['retry_count'] > instance_data['max_retries']:
                    logger.error(f"Max retries exceeded for instance {instance_id}")
                    break

                logger.info(f"Restarting instance {instance_id} in {instance_data['retry_delay']} seconds...")
                time.sleep(instance_data['retry_delay'])

                # Recreate the instance
                instance_data['app'] = WhatsApp(**instance_data['kwargs'])
                continue

    async def on_message(self, instance_id: str, message: Message):
        """Handle and forward incoming messages."""
        logger.info(f"Message from instance {instance_id}: {message}")
        if instance_id in self.forwarders and 'send' in self.forwarders[instance_id]:
            await self.forwarders[instance_id]['send'](message)

    async def on_event(self, instance_id: str, event):
        """Handle events."""
        logger.info(f"Event from instance {instance_id}: {event}")
        if instance_id in self.forwarders and 'sende' in self.forwarders[instance_id] and self.forwarders[instance_id]['sende'] is not None:
            self.forwarders[instance_id]['sende'](event)

    async def on_verification(self, instance_id: str, verification):
        """Handle verification events."""
        logger.info(f"Verification from instance {instance_id}: {verification}")

    def run_all_instances(self):
        """Start all instances in separate daemon threads."""
        # Start message forwarder

        # Start all instances
        for instance_id in self.instances:
            thread = Thread(
                target=self.run_instance,
                args=(instance_id,),
                daemon=True,
                name=f"WhatsApp-{instance_id}"
            )
            self.threads[instance_id] = thread
            thread.start()

    def signal_handler(self, signum, frame):
        """Handle shutdown signals gracefully."""
        logger.info("Shutdown signal received, stopping all instances...")
        self.stop_all_instances()
        sys.exit(0)

    def stop_all_instances(self):
        """Stop all running instances gracefully."""
        for instance_id in self.stop_events:
            self.stop_events[instance_id].set()

        for thread in self.threads.values():
            thread.join(timeout=5)

    def create_manager_ui(self, start_assistant):
        """Enhanced WhatsApp Manager UI with instance configuration controls"""
        self.runner = start_assistant
        def ui_manager():
            # Track instance states and messages
            original_on_message = self.on_message

            async def enhanced_on_message(instance_id: str, message):
                self.last_messages[instance_id] = datetime.now()
                await original_on_message(instance_id, message)

            self.on_message = enhanced_on_message

            def create_instance_card(instance_id: str):
                """Interactive instance control card"""
                config = self.instances[instance_id]
                with ui.card().classes('w-full p-4 mb-4 bg-gray-50 dark:bg-gray-800').style("background-color: var(--background-color) !important"):
                    # Header Section
                    with ui.row().classes('w-full justify-between items-center'):
                        ui.label(f'📱 {instance_id}').classes('text-xl font-bold')

                        # Status Indicator
                        ui.label().bind_text_from(
                            self.threads, instance_id,
                            lambda x: 'Running' if x and x.is_alive() else 'Stopped'
                        )

                    # Configuration Display
                    with ui.grid(columns=2).classes('w-full mt-4 gap-2'):

                        ui.label('port:').classes('font-bold')
                        ui.label(config['port'])

                        ui.label('Last Activity:').classes('font-bold')
                        ui.label().bind_text_from(
                            self.last_messages, instance_id,
                            lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if x else 'Never'
                        )

                    # Action Controls
                    with ui.row().classes('w-full mt-4 gap-2'):
                        with ui.button(icon='settings', on_click=lambda: edit_dialog.open()).props('flat'):
                            ui.tooltip('Configure')

                        with ui.button(icon='refresh', color='orange',
                                       on_click=lambda: self.restart_instance(instance_id)):
                            ui.tooltip('Restart')

                        with ui.button(icon='stop', color='red',
                                       on_click=lambda: self.stop_instance(instance_id)):
                            ui.tooltip('Stop')

                    # Edit Configuration Dialog
                    with ui.dialog() as edit_dialog, ui.card().classes('p-4 gap-4'):
                        new_key = ui.input('API Key', value=config['phone_number_id'].get('key', ''))
                        new_number = ui.input('Phone Number', value=config['phone_number_id'].get('number', ''))

                        with ui.row().classes('w-full justify-end'):
                            ui.button('Cancel', on_click=edit_dialog.close)
                            ui.button('Save', color='primary', on_click=lambda: (
                                self.update_instance_config(
                                    instance_id,
                                    new_key.value,
                                    new_number.value
                                ),
                                edit_dialog.close()
                            ))

            # Main UI Layout
            with ui.column().classes('w-full max-w-4xl mx-auto p-4'):
                ui.label('WhatsApp Instance Manager').classes('text-2xl font-bold mb-6')

                # Add Instance Section
                with ui.expansion('➕ Add New Instance', icon='add').classes('w-full'):
                    with ui.card().classes('w-full p-4 mt-2'):
                        instance_id = ui.input('Instance ID').classes('w-full')
                        token = ui.input('API Token').classes('w-full')
                        phone_key = ui.input('Phone Number Key').classes('w-full')
                        phone_number = ui.input('Phone Number').classes('w-full')

                        with ui.row().classes('w-full justify-end gap-2'):
                            ui.button('Clear', on_click=lambda: (
                                instance_id.set_value(''),
                                token.set_value(''),
                                phone_key.set_value(''),
                                phone_number.set_value('')
                            ))
                            ui.button('Create', color='positive', on_click=lambda: (
                                self.add_update_instance(
                                    instance_id.value,
                                    token.value,
                                    phone_key.value,
                                    phone_number.value
                                ),
                                instances_container.refresh()
                            ))

                # Instances Display
                instances_container = ui.column().classes('w-full')
                with instances_container:
                    for instance_id in self.instances:
                        create_instance_card(instance_id)

        return ui_manager

    # Add to manager class
    def add_update_instance(self, instance_id, token, phone_key, phone_number):
        """Add or update instance configuration"""
        if instance_id in self.instances:
            self.stop_instance(instance_id)
            del self.instances[instance_id]

        self.add_instance(
            instance_id,
            token=token,
            phone_number_id={
                'key': phone_key,
                'number': phone_number
            },
            verify_token=os.getenv("WHATSAPP_VERIFY_TOKEN")
        )
        self.start_instance(instance_id)

    def update_instance_config(self, instance_id, new_key, new_number):
        """Update existing instance configuration"""
        if instance_id in self.instances:
            self.instances[instance_id]['phone_number_id'] = {
                'key': new_key,
                'number': new_number
            }
            self.restart_instance(instance_id)

    def restart_instance(self, instance_id):
        """Safe restart of instance"""
        self.stop_instance(instance_id)
        self.start_instance(instance_id)

    def stop_instance(self, instance_id):
        """Graceful stop of instance"""
        if instance_id in self.threads:
            self.stop_events[instance_id].set()
            self.threads[instance_id].join(timeout=5)
            del self.threads[instance_id]

    def start_instance(self, instance_id):
        """Start instance thread"""
        print("Starting Istance")

        self.stop_events[instance_id] = threading.Event()
        self.threads[instance_id] = threading.Thread(
            target=self.run_instance,
            args=(instance_id,),
            daemon=True
        )
        self.threads[instance_id].start()
        print("Running starter", self.runner())
add_instance(instance_id, **kwargs)

Add a new app instance to the manager with automatic port assignment.

Source code in toolboxv2/mods/WhatsAppTb/server.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def add_instance(self, instance_id: str, **kwargs):
    """
    Add a new app instance to the manager with automatic port assignment.
    """
    if instance_id in self.instances:
        raise ValueError(f"Instance {instance_id} already exists")

    port = self.get_next_available_port()
    app_instance = WhatsApp(**kwargs)

    self.instances[instance_id] = {
        'app': app_instance,
        'port': port,
        'kwargs': kwargs,
        'phone_number_id': kwargs.get("phone_number_id", {}),
        'retry_count': 0,
        'max_retries': 3,
        'retry_delay': 5
    }
    self.keys[instance_id] = Code.one_way_hash(kwargs.get("phone_number_id", {}).get("key"), "WhatsappAppManager",
                                               self.pepper)
    self.forwarders[instance_id] = {}

    # Set up message handlers
    @app_instance.on_message
    async def message_handler(message):
        await self.on_message(instance_id, message)

    @app_instance.on_event
    async def event_handler(event):
        await self.on_event(instance_id, event)

    @app_instance.on_verification
    async def verification_handler(verification):
        await self.on_verification(instance_id, verification)

    # Create stop event for this instance Error parsing message1:
    self.stop_events[instance_id] = Event()
add_update_instance(instance_id, token, phone_key, phone_number)

Add or update instance configuration

Source code in toolboxv2/mods/WhatsAppTb/server.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def add_update_instance(self, instance_id, token, phone_key, phone_number):
    """Add or update instance configuration"""
    if instance_id in self.instances:
        self.stop_instance(instance_id)
        del self.instances[instance_id]

    self.add_instance(
        instance_id,
        token=token,
        phone_number_id={
            'key': phone_key,
            'number': phone_number
        },
        verify_token=os.getenv("WHATSAPP_VERIFY_TOKEN")
    )
    self.start_instance(instance_id)
create_manager_ui(start_assistant)

Enhanced WhatsApp Manager UI with instance configuration controls

Source code in toolboxv2/mods/WhatsAppTb/server.py
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
def create_manager_ui(self, start_assistant):
    """Enhanced WhatsApp Manager UI with instance configuration controls"""
    self.runner = start_assistant
    def ui_manager():
        # Track instance states and messages
        original_on_message = self.on_message

        async def enhanced_on_message(instance_id: str, message):
            self.last_messages[instance_id] = datetime.now()
            await original_on_message(instance_id, message)

        self.on_message = enhanced_on_message

        def create_instance_card(instance_id: str):
            """Interactive instance control card"""
            config = self.instances[instance_id]
            with ui.card().classes('w-full p-4 mb-4 bg-gray-50 dark:bg-gray-800').style("background-color: var(--background-color) !important"):
                # Header Section
                with ui.row().classes('w-full justify-between items-center'):
                    ui.label(f'📱 {instance_id}').classes('text-xl font-bold')

                    # Status Indicator
                    ui.label().bind_text_from(
                        self.threads, instance_id,
                        lambda x: 'Running' if x and x.is_alive() else 'Stopped'
                    )

                # Configuration Display
                with ui.grid(columns=2).classes('w-full mt-4 gap-2'):

                    ui.label('port:').classes('font-bold')
                    ui.label(config['port'])

                    ui.label('Last Activity:').classes('font-bold')
                    ui.label().bind_text_from(
                        self.last_messages, instance_id,
                        lambda x: x.strftime("%Y-%m-%d %H:%M:%S") if x else 'Never'
                    )

                # Action Controls
                with ui.row().classes('w-full mt-4 gap-2'):
                    with ui.button(icon='settings', on_click=lambda: edit_dialog.open()).props('flat'):
                        ui.tooltip('Configure')

                    with ui.button(icon='refresh', color='orange',
                                   on_click=lambda: self.restart_instance(instance_id)):
                        ui.tooltip('Restart')

                    with ui.button(icon='stop', color='red',
                                   on_click=lambda: self.stop_instance(instance_id)):
                        ui.tooltip('Stop')

                # Edit Configuration Dialog
                with ui.dialog() as edit_dialog, ui.card().classes('p-4 gap-4'):
                    new_key = ui.input('API Key', value=config['phone_number_id'].get('key', ''))
                    new_number = ui.input('Phone Number', value=config['phone_number_id'].get('number', ''))

                    with ui.row().classes('w-full justify-end'):
                        ui.button('Cancel', on_click=edit_dialog.close)
                        ui.button('Save', color='primary', on_click=lambda: (
                            self.update_instance_config(
                                instance_id,
                                new_key.value,
                                new_number.value
                            ),
                            edit_dialog.close()
                        ))

        # Main UI Layout
        with ui.column().classes('w-full max-w-4xl mx-auto p-4'):
            ui.label('WhatsApp Instance Manager').classes('text-2xl font-bold mb-6')

            # Add Instance Section
            with ui.expansion('➕ Add New Instance', icon='add').classes('w-full'):
                with ui.card().classes('w-full p-4 mt-2'):
                    instance_id = ui.input('Instance ID').classes('w-full')
                    token = ui.input('API Token').classes('w-full')
                    phone_key = ui.input('Phone Number Key').classes('w-full')
                    phone_number = ui.input('Phone Number').classes('w-full')

                    with ui.row().classes('w-full justify-end gap-2'):
                        ui.button('Clear', on_click=lambda: (
                            instance_id.set_value(''),
                            token.set_value(''),
                            phone_key.set_value(''),
                            phone_number.set_value('')
                        ))
                        ui.button('Create', color='positive', on_click=lambda: (
                            self.add_update_instance(
                                instance_id.value,
                                token.value,
                                phone_key.value,
                                phone_number.value
                            ),
                            instances_container.refresh()
                        ))

            # Instances Display
            instances_container = ui.column().classes('w-full')
            with instances_container:
                for instance_id in self.instances:
                    create_instance_card(instance_id)

    return ui_manager
get_next_available_port()

Find the next available port in the range.

Source code in toolboxv2/mods/WhatsAppTb/server.py
78
79
80
81
82
83
84
def get_next_available_port(self) -> int:
    """Find the next available port in the range."""
    used_ports = {instance['port'] for instance in self.instances.values()}
    for port in range(self.start_port, self.start_port + self.port_range):
        if port not in used_ports:
            return port
    raise RuntimeError("No available ports in range")
on_event(instance_id, event) async

Handle events.

Source code in toolboxv2/mods/WhatsAppTb/server.py
156
157
158
159
160
async def on_event(self, instance_id: str, event):
    """Handle events."""
    logger.info(f"Event from instance {instance_id}: {event}")
    if instance_id in self.forwarders and 'sende' in self.forwarders[instance_id] and self.forwarders[instance_id]['sende'] is not None:
        self.forwarders[instance_id]['sende'](event)
on_message(instance_id, message) async

Handle and forward incoming messages.

Source code in toolboxv2/mods/WhatsAppTb/server.py
150
151
152
153
154
async def on_message(self, instance_id: str, message: Message):
    """Handle and forward incoming messages."""
    logger.info(f"Message from instance {instance_id}: {message}")
    if instance_id in self.forwarders and 'send' in self.forwarders[instance_id]:
        await self.forwarders[instance_id]['send'](message)
on_verification(instance_id, verification) async

Handle verification events.

Source code in toolboxv2/mods/WhatsAppTb/server.py
162
163
164
async def on_verification(self, instance_id: str, verification):
    """Handle verification events."""
    logger.info(f"Verification from instance {instance_id}: {verification}")
restart_instance(instance_id)

Safe restart of instance

Source code in toolboxv2/mods/WhatsAppTb/server.py
327
328
329
330
def restart_instance(self, instance_id):
    """Safe restart of instance"""
    self.stop_instance(instance_id)
    self.start_instance(instance_id)
run_all_instances()

Start all instances in separate daemon threads.

Source code in toolboxv2/mods/WhatsAppTb/server.py
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def run_all_instances(self):
    """Start all instances in separate daemon threads."""
    # Start message forwarder

    # Start all instances
    for instance_id in self.instances:
        thread = Thread(
            target=self.run_instance,
            args=(instance_id,),
            daemon=True,
            name=f"WhatsApp-{instance_id}"
        )
        self.threads[instance_id] = thread
        thread.start()
run_instance(instance_id)

Run a single instance in a separate thread with error handling and automatic restart.

Source code in toolboxv2/mods/WhatsAppTb/server.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def run_instance(self, instance_id: str):
    """Run a single instance in a separate thread with error handling and automatic restart."""
    instance_data = self.instances[instance_id]
    stop_event = self.stop_events[instance_id]

    while not stop_event.is_set():
        try:
            logger.info(f"Starting instance {instance_id} on port {instance_data['port']}")
            instance_data['app'].run(host='0.0.0.0', port=instance_data['port'])

        except Exception as e:
            logger.error(f"Error in instance {instance_id}: {str(e)}")
            instance_data['retry_count'] += 1

            if instance_data['retry_count'] > instance_data['max_retries']:
                logger.error(f"Max retries exceeded for instance {instance_id}")
                break

            logger.info(f"Restarting instance {instance_id} in {instance_data['retry_delay']} seconds...")
            time.sleep(instance_data['retry_delay'])

            # Recreate the instance
            instance_data['app'] = WhatsApp(**instance_data['kwargs'])
            continue
signal_handler(signum, frame)

Handle shutdown signals gracefully.

Source code in toolboxv2/mods/WhatsAppTb/server.py
181
182
183
184
185
def signal_handler(self, signum, frame):
    """Handle shutdown signals gracefully."""
    logger.info("Shutdown signal received, stopping all instances...")
    self.stop_all_instances()
    sys.exit(0)
start_instance(instance_id)

Start instance thread

Source code in toolboxv2/mods/WhatsAppTb/server.py
339
340
341
342
343
344
345
346
347
348
349
350
def start_instance(self, instance_id):
    """Start instance thread"""
    print("Starting Istance")

    self.stop_events[instance_id] = threading.Event()
    self.threads[instance_id] = threading.Thread(
        target=self.run_instance,
        args=(instance_id,),
        daemon=True
    )
    self.threads[instance_id].start()
    print("Running starter", self.runner())
stop_all_instances()

Stop all running instances gracefully.

Source code in toolboxv2/mods/WhatsAppTb/server.py
187
188
189
190
191
192
193
def stop_all_instances(self):
    """Stop all running instances gracefully."""
    for instance_id in self.stop_events:
        self.stop_events[instance_id].set()

    for thread in self.threads.values():
        thread.join(timeout=5)
stop_instance(instance_id)

Graceful stop of instance

Source code in toolboxv2/mods/WhatsAppTb/server.py
332
333
334
335
336
337
def stop_instance(self, instance_id):
    """Graceful stop of instance"""
    if instance_id in self.threads:
        self.stop_events[instance_id].set()
        self.threads[instance_id].join(timeout=5)
        del self.threads[instance_id]
update_instance_config(instance_id, new_key, new_number)

Update existing instance configuration

Source code in toolboxv2/mods/WhatsAppTb/server.py
318
319
320
321
322
323
324
325
def update_instance_config(self, instance_id, new_key, new_number):
    """Update existing instance configuration"""
    if instance_id in self.instances:
        self.instances[instance_id]['phone_number_id'] = {
            'key': new_key,
            'number': new_number
        }
        self.restart_instance(instance_id)

utils

ProgressMessenger
Source code in toolboxv2/mods/WhatsAppTb/utils.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
class ProgressMessenger:
    def __init__(self, messenger, recipient_phone: str, max_steps: int = 5, emoji_set: list[str] = None, content=None):
        self.messenger = messenger
        self.recipient_phone = recipient_phone
        self.max_steps = max_steps
        self.emoji_set = emoji_set or ["⬜", "⬛", "🟩", "🟨", "🟦"]
        self.message_id = None
        self.content = content

    def send_initial_message(self, mode: str = "progress"):
        """
        Sends the initial message. Modes can be 'progress' or 'loading'.
        """
        if mode == "progress":
            emoji_legend = "\n".join(
                f"{emoji} - Step {i + 1}" for i, emoji in enumerate(self.emoji_set)
            )
            content = (
                "Progress is being updated in real-time!\n\n"
                "Legend:\n"
                f"{emoji_legend}\n\n"
                "Stay tuned for updates!"
            )
        elif mode == "loading":
            content = (
                "Loading in progress! 🌀\n"
                "The indicator will loop until work is done."
            )
        else:
            raise ValueError("Invalid mode. Use 'progress' or 'loading'.")

        if self.content is not None:
            content += '\n'+self.content
        message = self.messenger.create_message(content=content, to=self.recipient_phone)
        response = message.send(sender=0)
        self.message_id = response.get("messages", [{}])[0].get("id")
        logging.info(f"Initial message sent: {content}")
        return self.message_id

    def update_progress(self, step_flag: threading.Event):
        """
        Updates the reaction on the message to represent progress.
        """
        if not self.message_id:
            raise ValueError("Message ID not found. Ensure the initial message is sent first.")
        message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
        for step in range(self.max_steps):
            emoji = self.emoji_set[step % len(self.emoji_set)]
            message.react(emoji)
            logging.info(f"Progress updated: Step {step + 1}/{self.max_steps} with emoji {emoji}")
            while not step_flag.is_set():
                time.sleep(0.5)
            step_flag.clear()
        # Final acknowledgment
        message.react("👍")
        logging.info("Progress completed with final acknowledgment.")

    def update_loading(self, stop_flag: threading.Event):
        """
        Continuously updates the reaction to represent a looping 'loading' indicator.
        """
        if not self.message_id:
            raise ValueError("Message ID not found. Ensure the initial message is sent first.")
        message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
        step = 0
        while not stop_flag.is_set():
            emoji = self.emoji_set[step % len(self.emoji_set)]
            message.react(emoji)
            logging.info(f"Loading update: {emoji}")
            time.sleep(1)  # Faster updates for loading
            step += 1
        # Final acknowledgment
        message.react("✅")
        logging.info("Loading completed with final acknowledgment.")
        message.reply("✅Done✅")

    def start_progress_in_background(self, step_flag):
        """
        Starts the progress update in a separate thread.
        """
        threading.Thread(target=self.update_progress, args=(step_flag, ), daemon=True).start()

    def start_loading_in_background(self, stop_flag: threading.Event):
        """
        Starts the loading update in a separate thread.
        """
        threading.Thread(target=self.update_loading, args=(stop_flag,), daemon=True).start()
send_initial_message(mode='progress')

Sends the initial message. Modes can be 'progress' or 'loading'.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def send_initial_message(self, mode: str = "progress"):
    """
    Sends the initial message. Modes can be 'progress' or 'loading'.
    """
    if mode == "progress":
        emoji_legend = "\n".join(
            f"{emoji} - Step {i + 1}" for i, emoji in enumerate(self.emoji_set)
        )
        content = (
            "Progress is being updated in real-time!\n\n"
            "Legend:\n"
            f"{emoji_legend}\n\n"
            "Stay tuned for updates!"
        )
    elif mode == "loading":
        content = (
            "Loading in progress! 🌀\n"
            "The indicator will loop until work is done."
        )
    else:
        raise ValueError("Invalid mode. Use 'progress' or 'loading'.")

    if self.content is not None:
        content += '\n'+self.content
    message = self.messenger.create_message(content=content, to=self.recipient_phone)
    response = message.send(sender=0)
    self.message_id = response.get("messages", [{}])[0].get("id")
    logging.info(f"Initial message sent: {content}")
    return self.message_id
start_loading_in_background(stop_flag)

Starts the loading update in a separate thread.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
 97
 98
 99
100
101
def start_loading_in_background(self, stop_flag: threading.Event):
    """
    Starts the loading update in a separate thread.
    """
    threading.Thread(target=self.update_loading, args=(stop_flag,), daemon=True).start()
start_progress_in_background(step_flag)

Starts the progress update in a separate thread.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
91
92
93
94
95
def start_progress_in_background(self, step_flag):
    """
    Starts the progress update in a separate thread.
    """
    threading.Thread(target=self.update_progress, args=(step_flag, ), daemon=True).start()
update_loading(stop_flag)

Continuously updates the reaction to represent a looping 'loading' indicator.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def update_loading(self, stop_flag: threading.Event):
    """
    Continuously updates the reaction to represent a looping 'loading' indicator.
    """
    if not self.message_id:
        raise ValueError("Message ID not found. Ensure the initial message is sent first.")
    message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
    step = 0
    while not stop_flag.is_set():
        emoji = self.emoji_set[step % len(self.emoji_set)]
        message.react(emoji)
        logging.info(f"Loading update: {emoji}")
        time.sleep(1)  # Faster updates for loading
        step += 1
    # Final acknowledgment
    message.react("✅")
    logging.info("Loading completed with final acknowledgment.")
    message.reply("✅Done✅")
update_progress(step_flag)

Updates the reaction on the message to represent progress.

Source code in toolboxv2/mods/WhatsAppTb/utils.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def update_progress(self, step_flag: threading.Event):
    """
    Updates the reaction on the message to represent progress.
    """
    if not self.message_id:
        raise ValueError("Message ID not found. Ensure the initial message is sent first.")
    message = self.messenger.create_message(id=self.message_id, to=self.recipient_phone)
    for step in range(self.max_steps):
        emoji = self.emoji_set[step % len(self.emoji_set)]
        message.react(emoji)
        logging.info(f"Progress updated: Step {step + 1}/{self.max_steps} with emoji {emoji}")
        while not step_flag.is_set():
            time.sleep(0.5)
        step_flag.clear()
    # Final acknowledgment
    message.react("👍")
    logging.info("Progress completed with final acknowledgment.")

cli_functions

replace_bracketed_content(text, replacements, inlist=False)

Ersetzt Inhalte in eckigen Klammern mit entsprechenden Werten aus einem Wörterbuch.

:param text: Der zu verarbeitende Text als String. :param replacements: Ein Wörterbuch mit Schlüssel-Wert-Paaren für die Ersetzung. :return: Den modifizierten Text.

Source code in toolboxv2/mods/cli_functions.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
def replace_bracketed_content(text, replacements, inlist=False):
    """
    Ersetzt Inhalte in eckigen Klammern mit entsprechenden Werten aus einem Wörterbuch.

    :param text: Der zu verarbeitende Text als String.
    :param replacements: Ein Wörterbuch mit Schlüssel-Wert-Paaren für die Ersetzung.
    :return: Den modifizierten Text.
    """
    # Finde alle Vorkommen von Texten in eckigen Klammern
    matches = re.findall(r'\[([^\]]+)\]', text)

    # Ersetze jeden gefundenen Text durch den entsprechenden Wert aus dem Wörterbuch
    as_list = text.split(' ')
    i = 0
    for key in matches:
        if key in replacements:
            if not inlist:
                text = text.replace(f'[{key}]', str(replacements[key]))
            else:
                as_list[i] = replacements[key]
        i += 1
    if not inlist:
        return text
    return as_list

helper

create_invitation(app, username)

Creates a one-time invitation code for a user to link a new device.

Source code in toolboxv2/mods/helper.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
@export(mod_name=Name, name="create-invitation", test=False)
def create_invitation(app: App, username: str):
    """Creates a one-time invitation code for a user to link a new device."""
    print(f"Creating invitation for user '{username}'...")
    app.load_mod("CloudM")
    result = app.run_any(TBEF.CLOUDM_AUTHMANAGER.GET_INVITATION,
                         get_results=True,
                         username=username)

    if result.is_ok():
        print(f"✅ Invitation code for '{username}': {result.get()}")
    else:
        print("❌ Error creating invitation:")
        result.print()
    return result

create_user(app, username, email)

Creates a new user with a generated key pair.

Source code in toolboxv2/mods/helper.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
@export(mod_name=Name, name="create-user", test=False)
def create_user(app: App, username: str, email: str):
    """Creates a new user with a generated key pair."""
    print(f"Creating user '{username}' with email '{email}'...")
    app.load_mod("CloudM")
    # Generate an invitation on the fly
    invitation_res = app.run_any(TBEF.CLOUDM_AUTHMANAGER.GET_INVITATION,
                                 get_results=True,
                                 username=username)
    if invitation_res.is_error():
        print("❌ Error creating invitation:")
        invitation_res.print()
        return invitation_res

    result = app.run_any(TBEF.CLOUDM_AUTHMANAGER.CRATE_LOCAL_ACCOUNT,
                         get_results=True,
                         username=username,
                         email=email,
                         invitation=invitation_res.get(),
                         create=True)

    if result.is_ok():
        print(f"✅ User '{username}' created successfully.")
    else:
        print("❌ Error creating user:")
        result.print()
    return result

delete_user_cli(app, username)

Deletes a user and all their associated data.

Source code in toolboxv2/mods/helper.py
85
86
87
88
89
90
91
92
93
94
95
96
@export(mod_name=Name, name="delete-user", test=False)
def delete_user_cli(app: App, username: str):
    """Deletes a user and all their associated data."""
    print(f"Attempting to delete user '{username}'...")
    app.load_mod("CloudM")
    result = app.run_any(TBEF.CLOUDM_AUTHMANAGER.DELETE_USER, get_results=True, username=username)

    if result.is_ok():
        print(f"✅ User '{username}' has been deleted.")
    else:
        print(f"❌ Error deleting user: {result.info.get('help_text')}")
    return result

init_system(app) async

Initializes the ToolBoxV2 system by creating the first administrative user. This is an interactive command.

Source code in toolboxv2/mods/helper.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
@export(mod_name=Name, name="init_system", test=False)
async def init_system(app: App):
    """
    Initializes the ToolBoxV2 system by creating the first administrative user.
    This is an interactive command.
    """
    print("--- ToolBoxV2 System Initialization ---")
    print("This will guide you through creating the first administrator account.")
    print("This account will have the highest permission level.\n")

    try:
        username = input("Enter the administrator's username: ").strip()
        if not username:
            print("Username cannot be empty.")
            return Result.default_user_error("Username cannot be empty.")

        email = input(f"Enter the email for '{username}': ").strip()
        if not email: # A simple check, can be improved with regex
            print("Email cannot be empty.")
            return Result.default_user_error("Email cannot be empty.")

        print(f"\nCreating user '{username}' with email '{email}'...")
        # Call the internal function to create the account
        # The 'create=True' flag likely handles the initial key generation
        result = await app.a_run_any(TBEF.CLOUDM.REGISTER_INITIAL_LOOT_USER,
                                     user_name=username,
                                     email=email,
                                     get_results=True)

        if result.is_ok():
            print("\n✅ Administrator account created successfully!")
            print("   A new cryptographic key pair has been generated for this user.")
            print("   Authentication is handled automatically using these keys.")
            print("   You can now use other CLI commands or log into the web UI.")
            return Result.ok("System initialized successfully.")
        else:
            print("\n❌ Error creating administrator account:")
            result.print()
            return result

    except (KeyboardInterrupt, EOFError):
        print("\n\nInitialization cancelled by user.")
        return Result.default_user_error("Initialization cancelled.")
    except Exception as e:
        print(f"\nAn unexpected error occurred: {e}")
        return Result.default_internal_error(f"An unexpected error occurred: {e}")

list_users_cli(app)

Lists all registered users.

Source code in toolboxv2/mods/helper.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
@export(mod_name=Name, name="list-users", test=False)
def list_users_cli(app: App):
    """Lists all registered users."""
    print("Fetching user list...")
    app.load_mod("CloudM")
    result = app.run_any(TBEF.CLOUDM_AUTHMANAGER.LIST_USERS, get_results=True)

    if result.is_ok():
        users = result.get()
        if not users:
            print("No users found.")
            return result

        print("--- Registered Users ---")
        # Simple table formatting
        print(f"{'Username':<25} {'Email':<30} {'Level'}")
        print("------------------------")
        for user in users:
            print(f"{user['username']:<25} {user['email']:<30} {user['level']}")
        print("------------------------")
    else:
        print("❌ Error listing users:")
        result.print()

    return result

Sends a magic login link to the user's registered email address.

Source code in toolboxv2/mods/helper.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
@export(mod_name=Name, name="send-magic-link", test=False)
def send_magic_link(app: App, username: str):
    """Sends a magic login link to the user's registered email address."""
    print(f"Sending magic link to user '{username}'...")
    app.load_mod("CloudM")
    result = app.run_any(TBEF.CLOUDM_AUTHMANAGER.GET_MAGIC_LINK_EMAIL,
                         get_results=True,
                         username=username)

    if result.is_ok():
        print(f"✅ Magic link sent successfully to the email address associated with '{username}'.")
    else:
        print("❌ Error sending magic link:")
        result.print()
    return result

isaa

CodingAgent

live
AsyncCodeDetector

Bases: NodeVisitor

Detect async code and top-level await

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
class AsyncCodeDetector(ast.NodeVisitor):
    """Detect async code and top-level await"""
    def __init__(self):
        self.has_async = False
        self.has_top_level_await = False
        self.await_nodes = []

    def visit_AsyncFunctionDef(self, node):
        self.has_async = True
        self.generic_visit(node)

    def visit_Await(self, node):
        self.has_async = True
        # Track all await nodes
        self.await_nodes.append(node)
        # Check if this await is at top level
        parent = node
        while hasattr(parent, 'parent'):
            parent = parent.parent
            if isinstance(parent, ast.AsyncFunctionDef | ast.FunctionDef):
                break
        else:
            self.has_top_level_await = True
        self.generic_visit(node)
CargoRustInterface

Usage :

Create interface

cargo_interface = CargoRustInterface()

Set up new project

await cargo_interface.setup_project("hello_rust")

Add a dependency

await cargo_interface.add_dependency("serde", "1.0")

Write and run some code

code = """ fn main() { println!("Hello, Rust!"); } """ result = await cargo_interface.run_code(code)

Modify code

new_function = """ fn main() { println!("Modified Hello, Rust!"); } """ await cargo_interface.modify_code(new_function, "main()")

Build and test

await cargo_interface.build() await cargo_interface.test()

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
class CargoRustInterface:
    '''Usage :
# Create interface
cargo_interface = CargoRustInterface()

# Set up new project
await cargo_interface.setup_project("hello_rust")

# Add a dependency
await cargo_interface.add_dependency("serde", "1.0")

# Write and run some code
code = """
fn main() {
    println!("Hello, Rust!");
}
"""
result = await cargo_interface.run_code(code)

# Modify code
new_function = """
fn main() {
    println!("Modified Hello, Rust!");
}
"""
await cargo_interface.modify_code(new_function, "main()")

# Build and test
await cargo_interface.build()
await cargo_interface.test()

    '''
    def __init__(self, session_dir=None, auto_remove=True):
        """Initialize the Rust/Cargo interface"""
        self.auto_remove = auto_remove
        self._session_dir = session_dir or Path.home() / '.cargo_sessions'
        self._session_dir.mkdir(exist_ok=True)
        self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
        self.output_history = {}
        self._execution_count = 0
        self.current_project = None

    def reset(self):
        """Reset the interface state"""
        if self.auto_remove and self.current_project:
            shutil.rmtree(self.current_project, ignore_errors=True)
        self.output_history.clear()
        self._execution_count = 0
        self.current_project = None

    async def setup_project(self, name: str) -> str:
        """Set up a new Cargo project"""
        try:
            project_path = self.vfs.base_dir / name
            if project_path.exists():
                shutil.rmtree(project_path)

            result = subprocess.run(
                ['cargo', 'new', str(project_path)],
                capture_output=True,
                text=True, check=True
            )

            if result.returncode != 0:
                return f"Error creating project: {result.stderr}"

            self.current_project = project_path
            return f"Created new project at {project_path}"

        except Exception as e:
            return f"Failed to create project: {str(e)}"

    async def add_dependency(self, name: str, version: str | None = None) -> str:
        """Add a dependency to Cargo.toml"""
        if not self.current_project:
            return "No active project"

        try:
            cargo_toml = self.current_project / "Cargo.toml"
            if not cargo_toml.exists():
                return "Cargo.toml not found"

            cmd = ['cargo', 'add', name]
            if version:
                cmd.extend(['--vers', version])

            result = subprocess.run(
                cmd,
                cwd=self.current_project,
                capture_output=True,
                text=True,check=True
            )

            return result.stdout if result.returncode == 0 else f"Error: {result.stderr}"

        except Exception as e:
            return f"Failed to add dependency: {str(e)}"

    async def build(self, release: bool = False) -> str:
        """Build the project"""
        if not self.current_project:
            return "No active project"

        try:
            cmd = ['cargo', 'build']
            if release:
                cmd.append('--release')

            result = subprocess.run(
                cmd,
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            return result.stdout if result.returncode == 0 else f"Build error: {result.stderr}"

        except Exception as e:
            return f"Build failed: {str(e)}"

    async def test(self) -> str:
        """Run project tests"""
        if not self.current_project:
            return "No active project"

        try:
            result = subprocess.run(
                ['cargo', 'test'],
                cwd=self.current_project,
                capture_output=True,
                text=True, check=True
            )

            return result.stdout if result.returncode == 0 else f"Test error: {result.stderr}"

        except Exception as e:
            return f"Tests failed: {str(e)}"

    async def run_code(self, code: str) -> str:
        """Run Rust code"""
        if not self.current_project:
            return "No active project"

        try:
            # Write code to main.rs
            main_rs = self.current_project / "src" / "main.rs"
            with open(main_rs, 'w') as f:
                f.write(code)

            # Build and run
            build_result = subprocess.run(
                ['cargo', 'build'],
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            if build_result.returncode != 0:
                return f"Compilation error: {build_result.stderr}"

            run_result = subprocess.run(
                ['cargo', 'run'],
                cwd=self.current_project,
                capture_output=True,
                text=True
            )

            self._execution_count += 1
            output = {
                'code': code,
                'stdout': run_result.stdout,
                'stderr': run_result.stderr,
                'result': run_result.returncode == 0
            }
            self.output_history[self._execution_count] = output

            return run_result.stdout if run_result.returncode == 0 else f"Runtime error: {run_result.stderr}"

        except Exception as e:
            return f"Execution failed: {str(e)}"

    async def modify_code(self, code: str, object_name: str, file: str = "src/main.rs") -> str:
        """Modify existing Rust code"""
        if not self.current_project:
            return "No active project"

        try:
            file_path = self.current_project / file
            if not file_path.exists():
                return f"File {file} not found"

            with open(file_path) as f:
                content = f.read()

            # Handle function modification
            if object_name.endswith("()"):
                func_name = object_name[:-2]
                # Find and replace function definition
                pattern = f"fn {func_name}.*?}}(?=\n|$)"
                updated_content = re.sub(pattern, code.strip(), content, flags=re.DOTALL)
            else:
                # Handle other modifications (structs, constants, etc.)
                pattern = f"{object_name}.*?(?=\n|$)"
                updated_content = re.sub(pattern, code.strip(), content)

            with open(file_path, 'w') as f:
                f.write(updated_content)

            return f"Modified {object_name} in {file}"

        except Exception as e:
            return f"Modification failed: {str(e)}"

    def save_session(self, name: str):
        """Save current session state"""
        session_file = self._session_dir / f"{name}.json"
        state = {
            'output_history': self.output_history,
            'current_project': str(self.current_project) if self.current_project else None
        }

        with open(session_file, 'w') as f:
            json.dump(state, f)

    def load_session(self, name: str):
        """Load saved session state"""
        session_file = self._session_dir / f"{name}.json"
        if session_file.exists():
            with open(session_file) as f:
                state = json.load(f)
                self.output_history = state['output_history']
                self.current_project = Path(state['current_project']) if state['current_project'] else None
__init__(session_dir=None, auto_remove=True)

Initialize the Rust/Cargo interface

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
66
67
68
69
70
71
72
73
74
def __init__(self, session_dir=None, auto_remove=True):
    """Initialize the Rust/Cargo interface"""
    self.auto_remove = auto_remove
    self._session_dir = session_dir or Path.home() / '.cargo_sessions'
    self._session_dir.mkdir(exist_ok=True)
    self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
    self.output_history = {}
    self._execution_count = 0
    self.current_project = None
add_dependency(name, version=None) async

Add a dependency to Cargo.toml

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
async def add_dependency(self, name: str, version: str | None = None) -> str:
    """Add a dependency to Cargo.toml"""
    if not self.current_project:
        return "No active project"

    try:
        cargo_toml = self.current_project / "Cargo.toml"
        if not cargo_toml.exists():
            return "Cargo.toml not found"

        cmd = ['cargo', 'add', name]
        if version:
            cmd.extend(['--vers', version])

        result = subprocess.run(
            cmd,
            cwd=self.current_project,
            capture_output=True,
            text=True,check=True
        )

        return result.stdout if result.returncode == 0 else f"Error: {result.stderr}"

    except Exception as e:
        return f"Failed to add dependency: {str(e)}"
build(release=False) async

Build the project

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
async def build(self, release: bool = False) -> str:
    """Build the project"""
    if not self.current_project:
        return "No active project"

    try:
        cmd = ['cargo', 'build']
        if release:
            cmd.append('--release')

        result = subprocess.run(
            cmd,
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        return result.stdout if result.returncode == 0 else f"Build error: {result.stderr}"

    except Exception as e:
        return f"Build failed: {str(e)}"
load_session(name)

Load saved session state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
258
259
260
261
262
263
264
265
def load_session(self, name: str):
    """Load saved session state"""
    session_file = self._session_dir / f"{name}.json"
    if session_file.exists():
        with open(session_file) as f:
            state = json.load(f)
            self.output_history = state['output_history']
            self.current_project = Path(state['current_project']) if state['current_project'] else None
modify_code(code, object_name, file='src/main.rs') async

Modify existing Rust code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
async def modify_code(self, code: str, object_name: str, file: str = "src/main.rs") -> str:
    """Modify existing Rust code"""
    if not self.current_project:
        return "No active project"

    try:
        file_path = self.current_project / file
        if not file_path.exists():
            return f"File {file} not found"

        with open(file_path) as f:
            content = f.read()

        # Handle function modification
        if object_name.endswith("()"):
            func_name = object_name[:-2]
            # Find and replace function definition
            pattern = f"fn {func_name}.*?}}(?=\n|$)"
            updated_content = re.sub(pattern, code.strip(), content, flags=re.DOTALL)
        else:
            # Handle other modifications (structs, constants, etc.)
            pattern = f"{object_name}.*?(?=\n|$)"
            updated_content = re.sub(pattern, code.strip(), content)

        with open(file_path, 'w') as f:
            f.write(updated_content)

        return f"Modified {object_name} in {file}"

    except Exception as e:
        return f"Modification failed: {str(e)}"
reset()

Reset the interface state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
76
77
78
79
80
81
82
def reset(self):
    """Reset the interface state"""
    if self.auto_remove and self.current_project:
        shutil.rmtree(self.current_project, ignore_errors=True)
    self.output_history.clear()
    self._execution_count = 0
    self.current_project = None
run_code(code) async

Run Rust code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
async def run_code(self, code: str) -> str:
    """Run Rust code"""
    if not self.current_project:
        return "No active project"

    try:
        # Write code to main.rs
        main_rs = self.current_project / "src" / "main.rs"
        with open(main_rs, 'w') as f:
            f.write(code)

        # Build and run
        build_result = subprocess.run(
            ['cargo', 'build'],
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        if build_result.returncode != 0:
            return f"Compilation error: {build_result.stderr}"

        run_result = subprocess.run(
            ['cargo', 'run'],
            cwd=self.current_project,
            capture_output=True,
            text=True
        )

        self._execution_count += 1
        output = {
            'code': code,
            'stdout': run_result.stdout,
            'stderr': run_result.stderr,
            'result': run_result.returncode == 0
        }
        self.output_history[self._execution_count] = output

        return run_result.stdout if run_result.returncode == 0 else f"Runtime error: {run_result.stderr}"

    except Exception as e:
        return f"Execution failed: {str(e)}"
save_session(name)

Save current session state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
247
248
249
250
251
252
253
254
255
256
def save_session(self, name: str):
    """Save current session state"""
    session_file = self._session_dir / f"{name}.json"
    state = {
        'output_history': self.output_history,
        'current_project': str(self.current_project) if self.current_project else None
    }

    with open(session_file, 'w') as f:
        json.dump(state, f)
setup_project(name) async

Set up a new Cargo project

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
async def setup_project(self, name: str) -> str:
    """Set up a new Cargo project"""
    try:
        project_path = self.vfs.base_dir / name
        if project_path.exists():
            shutil.rmtree(project_path)

        result = subprocess.run(
            ['cargo', 'new', str(project_path)],
            capture_output=True,
            text=True, check=True
        )

        if result.returncode != 0:
            return f"Error creating project: {result.stderr}"

        self.current_project = project_path
        return f"Created new project at {project_path}"

    except Exception as e:
        return f"Failed to create project: {str(e)}"
test() async

Run project tests

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
async def test(self) -> str:
    """Run project tests"""
    if not self.current_project:
        return "No active project"

    try:
        result = subprocess.run(
            ['cargo', 'test'],
            cwd=self.current_project,
            capture_output=True,
            text=True, check=True
        )

        return result.stdout if result.returncode == 0 else f"Test error: {result.stderr}"

    except Exception as e:
        return f"Tests failed: {str(e)}"
MockIPython
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
class MockIPython:
    def __init__(self, _session_dir=None, auto_remove=True):
        self.auto_remove = auto_remove
        self.output_history = {}
        self._execution_count = 0
        self._session_dir = _session_dir or Path(get_app().appdata) / '.pipeline_sessions'
        self._session_dir.mkdir(exist_ok=True)
        self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')
        self._venv_path = self._session_dir / 'venv'
        self.user_ns: dict[str, Any] = {}
        nest_asyncio.apply()
        # Set up virtual environment if it doesn't exist
        with Spinner("Starting virtual environment"):
            self._setup_venv()
        self.reset()

    def _setup_venv(self):
        """Create virtual environment if it doesn't exist"""
        if not self._venv_path.exists():
            try:
                subprocess.run([sys.executable, "-m", "venv", str(self._venv_path)], check=True)
            except subprocess.CalledProcessError as e:
                raise RuntimeError(f"Failed to create virtual environment: {str(e)}")

    def _virtual_open(self, filepath, mode='r', *args, **kwargs):
        """Custom open function that uses virtual filesystem"""
        abs_path = self.vfs._resolve_path(filepath)

        if 'w' in mode or 'a' in mode:
            # Ensure parent directory exists
            abs_path.parent.mkdir(parents=True, exist_ok=True)

        # Use actual filesystem but track in virtual fs
        real_file = open(abs_path, mode, *args, **kwargs)

        if 'r' in mode:
            # Track file content in virtual filesystem when reading
            rel_path = str(abs_path.relative_to(self.vfs.base_dir))
            if rel_path not in self.vfs.virtual_files:
                try:
                    self.vfs.virtual_files[rel_path] = real_file.read()
                    real_file.seek(0)
                except UnicodeDecodeError:
                    # Handle binary files
                    pass

        return real_file

    def reset(self):
        """Reset the interpreter state"""
        self.user_ns = {
            '__name__': '__main__',
            '__builtins__': __builtins__,
            'toolboxv2': toolboxv2,
            '__file__': None,
            '__path__': [str(self.vfs.current_dir)],
            'auto_install': auto_install,
            'modify_code': self.modify_code,
        }
        self.output_history.clear()
        self._execution_count = 0
        if self.auto_remove:
            shutil.rmtree(self.vfs.base_dir, ignore_errors=True)

    def get_namespace(self) -> dict[str, Any]:
        """Get current namespace"""
        return self.user_ns.copy()

    def update_namespace(self, variables: dict[str, Any]):
        """Update namespace with new variables"""
        self.user_ns.update(variables)

    @staticmethod
    def _parse_code(code: str) -> tuple[Any, Any | None, bool, bool]:
        """Parse code and handle top-level await"""
        code_ = ""
        for line in code.split('\n'):
            if line.strip().startswith('#'):
                continue
            if line.strip().startswith('asyncio.run('):
                line = (' ' *(len(line) - len(line.strip()))) + 'await ' + line.strip()[len('asyncio.run('):-1]
            code_ += line + '\n'
        try:
            tree = ast.parse(code)
            # Add parent references
            ParentNodeTransformer().visit(tree)

            # Detect async features
            detector = AsyncCodeDetector()
            detector.visit(tree)

            if detector.has_top_level_await:
                # Wrap code in async function
                wrapped_code = "async def __wrapper():\n"
                wrapped_code += "    global result\n"  # Allow writing to global scope
                wrapped_code += "    result = None\n"
                # add try:
                wrapped_code +="    try:\n"
                # Indent the original code
                wrapped_code += "\n".join(f"        {line}" for line in code.splitlines())
                # Add return statement for last expression
                wrapped_code +="\n    except Exception as e:\n"
                wrapped_code +="        import traceback\n"
                wrapped_code +="        print(traceback.format_exc())\n"
                wrapped_code +="        raise e\n"
                if isinstance(tree.body[-1], ast.Expr):
                    wrapped_code += "\n    return result"

                # Parse and compile wrapped code
                wrapped_tree = ast.parse(wrapped_code)
                return (
                    compile(wrapped_tree, '<exec>', 'exec'),
                    None,
                    True,
                    True
                )

            # Handle regular code
            if isinstance(tree.body[-1], ast.Expr):
                exec_code = ast.Module(
                    body=tree.body[:-1],
                    type_ignores=[]
                )
                eval_code = ast.Expression(
                    body=tree.body[-1].value
                )
                return (
                    compile(exec_code, '<exec>', 'exec'),
                    compile(eval_code, '<eval>', 'eval'),
                    detector.has_async,
                    False
                )

            return (
                compile(tree, '<exec>', 'exec'),
                None,
                detector.has_async,
                False
            )

        except SyntaxError as e:
            lines = code.splitlines()
            if e.lineno and e.lineno <= len(lines):
                line = lines[e.lineno - 1]
                arrow = ' ' * (e.offset - 1) + '^' if e.offset else ''
                error_msg = (
                    f"Syntax error at line {e.lineno}:\n"
                    f"{line}\n"
                    f"{arrow}\n"
                    f"{e.msg}"
                )
            else:
                error_msg = str(e)

            error_msg += traceback.format_exc()

            raise SyntaxError(error_msg) from e

    async def run_cell(self, code: str, live_output: bool = True) -> Any:
        """Async version of run_cell that handles both sync and async code"""
        result = None
        error = None
        tb = None
        original_dir = os.getcwd()

        if live_output:
            stdout_buffer = io.StringIO()
            stderr_buffer = io.StringIO()
            stdout = TeeStream(sys.__stdout__, stdout_buffer)
            stderr = TeeStream(sys.__stderr__, stderr_buffer)
        else:
            stdout = io.StringIO()
            stderr = io.StringIO()

        try:
            # Check if a file is already specified
            original_file = self.user_ns.get('__file__')
            if original_file is None:
                # Create temp file if no file specified
                temp_file = self.vfs.write_file(
                    f'src/temp/_temp_{self._execution_count}.py',
                    code
                )
                # work_ns = self.user_ns.copy()
                self.user_ns['__file__'] = str(temp_file)
            else:
                # Use existing file
                temp_file = Path(original_file)
                # Write code to the existing file
                self.vfs.write_file(temp_file, code)
                #work_ns = self.user_ns.copy()

            self.user_ns['__builtins__'] = __builtins__
            with VirtualEnvContext(self._venv_path) as python_exec:
                try:
                    exec_code, eval_code, is_async, has_top_level_await = self._parse_code(
                        code.encode('utf-8', errors='replace').decode('utf-8')
                    )
                    if exec_code is None:
                        return "No executable code"
                    os.makedirs(str(temp_file.parent.absolute()), exist_ok=True)
                    os.chdir(str(temp_file.parent.absolute()))
                    self.user_ns['PYTHON_EXEC'] = python_exec

                    with redirect_stdout(stdout), redirect_stderr(stderr):
                        if has_top_level_await:
                            try:
                                # Execute wrapped code and await it
                                exec(exec_code, self.user_ns)
                                result = self.user_ns['__wrapper']()
                                if asyncio.iscoroutine(result):
                                    result = await result
                            finally:
                                self.user_ns.pop('__wrapper', None)
                        elif is_async:
                            # Execute async code
                            exec(exec_code, self.user_ns)
                            if eval_code:
                                result = eval(eval_code, self.user_ns)
                                if asyncio.iscoroutine(result):
                                    result = await result
                        else:
                            # Execute sync code
                            exec(exec_code, self.user_ns)
                            if eval_code:
                                result = eval(eval_code, self.user_ns)

                        if result is not None:
                            self.user_ns['_'] = result
                except KeyboardInterrupt:
                    print("Stop execution manuel!")

                except Exception as e:
                    error = str(e)
                    tb = traceback.format_exc()
                    if live_output:
                        sys.__stderr__.write(f"{error}\n{tb}")
                    stderr.write(f"{error}\n{tb}")

                finally:
                    os.chdir(original_dir)
                    self._execution_count += 1
                    # self.user_ns = work_ns.copy()
                    if live_output:
                        stdout_value = stdout_buffer.getvalue()
                        stderr_value = stderr_buffer.getvalue()
                    else:
                        stdout_value = stdout.getvalue()
                        stderr_value = stderr.getvalue()

                    output = {
                        'code': code,
                        'stdout': stdout_value,
                        'stderr': stderr_value,
                        'result': result if result else "stdout"
                    }
                    self.output_history[self._execution_count] = output

        except Exception as e:
            error_msg = f"Error executing code: {str(e)}\n{traceback.format_exc()}"
            if live_output:
                sys.__stderr__.write(error_msg)
            return error_msg

        if not result:
            result = ""
        if output['stdout']:
            result = f"{result}\nstdout:{output['stdout']}"
        if output['stderr']:
            result = f"{result}\nstderr:{output['stderr']}"

        if self.auto_remove and original_file is None:
            # Only remove temp files, not user-specified files
            self.vfs.delete_file(temp_file)

        return result

    async def modify_code(self, code: str = None, object_name: str = None, file: str = None) -> str:
        '''
        Modify existing code in memory (user namespace) and optionally in the corresponding file.

        This method updates variables, functions, or methods in the current Python session and can
        also update the corresponding source file if specified.

        Args:
            code: New value or implementation for the object
            object_name: Name of the object to modify (variable, function, or method)
            file: Path to the file to update (if None, only updates in memory)

        Returns:
            String describing the modification result

        Examples:

        # 1. Update a variable in memory
        await ipython.modify_code(code="5", object_name="x")

    # 2. Change a method implementation
    await ipython.modify_code(
        code='"""def sound(self):\n        return "Woof""""',
        object_name="Dog.sound"
    )

    # 3. Modify a function
    await ipython.modify_code(
        code='"""def calculate_age():\n    return 25"""',
        object_name="calculate_age"
    )

    # 4. Update variable in memory and file
    await ipython.modify_code(
        code="100",
        object_name="MAX_SIZE",
        file="config.py"
    )

    # 5. Modifying an attribute in __init__
    await ipython.modify_code(
        code='"""def __init__(self):\n        self.name = "Buddy""""',
        object_name="Dog.__init__"
    )
        '''
        try:
            if not object_name:
                raise ValueError("Object name must be specified")
            if code is None:
                raise ValueError("New code or value must be provided")

            # Process object name (handle methods with parentheses)
            clean_object_name = object_name.replace("()", "")

            # Step 1: Update in memory (user namespace)
            result_message = []

            # Handle different types of objects
            if "." in clean_object_name:
                # For methods or class attributes
                parts = clean_object_name.split(".")
                base_obj_name = parts[0]
                attr_name = parts[1]

                if base_obj_name not in self.user_ns:
                    raise ValueError(f"Object '{base_obj_name}' not found in namespace")

                base_obj = self.user_ns[base_obj_name]

                # Handle method definitions which are passed as docstrings
                if code.split('\n'):
                    method_code = code

                    # Parse the method code to extract its body
                    method_ast = ast.parse(method_code).body[0]
                    method_name = method_ast.name

                    # Create a new function object from the code
                    method_locals = {}
                    exec(
                        f"def _temp_func{signature(getattr(base_obj.__class__, attr_name, None))}: {method_ast.body[0].value.s}",
                        globals(), method_locals)
                    new_method = method_locals['_temp_func']

                    # Set the method on the class
                    setattr(base_obj.__class__, attr_name, new_method)
                    result_message.append(f"Updated method '{clean_object_name}' in memory")
                else:
                    # For simple attributes
                    setattr(base_obj, attr_name, eval(code, self.user_ns))
                    result_message.append(f"Updated attribute '{clean_object_name}' in memory")
            else:
                # For variables and functions
                if code.startswith('"""') and code.endswith('"""'):
                    # Handle function definitions
                    func_code = code.strip('"""')
                    func_ast = ast.parse(func_code).body[0]
                    func_name = func_ast.name

                    # Create a new function object from the code
                    func_locals = {}
                    exec(f"{func_code}", globals(), func_locals)
                    self.user_ns[clean_object_name] = func_locals[func_name]
                    result_message.append(f"Updated function '{clean_object_name}' in memory")
                else:
                    # Simple variable assignment
                    self.user_ns[clean_object_name] = eval(code, self.user_ns)
                    result_message.append(f"Updated variable '{clean_object_name}' in memory")

            # Step 2: Update in file if specified
            if file is not None:
                file_path = self.vfs._resolve_path(file)

                if not file_path.exists():
                    self.user_ns['__file__'] = str(file_path)
                    return await self.run_cell(code)

                # Read original content
                original_content = self.vfs.read_file(file_path)
                updated_content = original_content

                # Handle different object types for file updates
                if "." in clean_object_name:
                    # For methods
                    parts = clean_object_name.split(".")
                    class_name = parts[0]
                    method_name = parts[1]

                    if code.startswith('"""') and code.endswith('"""'):
                        method_code = code.strip('"""')

                        # Use ast to parse the file and find the method to replace
                        file_ast = ast.parse(original_content)
                        for node in ast.walk(file_ast):
                            if isinstance(node, ast.ClassDef) and node.name == class_name:
                                for method in node.body:
                                    if isinstance(method, ast.FunctionDef) and method.name == method_name:
                                        # Find the method in the source code
                                        method_pattern = fr"def {method_name}.*?:(.*?)(?=\n    \w|\n\w|\Z)"
                                        method_match = re.search(method_pattern, original_content, re.DOTALL)

                                        if method_match:
                                            indentation = re.match(r"^(\s*)", method_match.group(0)).group(1)
                                            method_indented = textwrap.indent(method_code, indentation)
                                            updated_content = original_content.replace(
                                                method_match.group(0),
                                                method_indented
                                            )
                                            self.vfs.write_file(file_path, updated_content)
                                            result_message.append(
                                                f"Updated method '{clean_object_name}' in file '{file}'")
                else:
                    # For variables and functions
                    if code.startswith('"""') and code.endswith('"""'):
                        # Handle function updates
                        func_code = code.strip('"""')
                        func_pattern = fr"def {clean_object_name}.*?:(.*?)(?=\n\w|\Z)"
                        func_match = re.search(func_pattern, original_content, re.DOTALL)

                        if func_match:
                            indentation = re.match(r"^(\s*)", func_match.group(0)).group(1)
                            func_indented = textwrap.indent(func_code, indentation)
                            updated_content = original_content.replace(
                                func_match.group(0),
                                func_indented
                            )
                            self.vfs.write_file(file_path, updated_content)
                            result_message.append(f"Updated function '{clean_object_name}' in file '{file}'")
                    else:
                        # Handle variable updates
                        var_pattern = fr"{clean_object_name}\s*=.*"
                        var_replacement = f"{clean_object_name} = {code}"
                        updated_content = re.sub(var_pattern, var_replacement, original_content)

                        if updated_content != original_content:
                            self.vfs.write_file(file_path, updated_content)
                            result_message.append(f"Updated variable '{clean_object_name}' in file '{file}'")
                        else:
                            result_message.append(f"Could not find variable '{clean_object_name}' in file '{file}'")

            return "\n".join(result_message)

        except Exception as e:
            return f"Error during code modification: {str(e)}\n{traceback.format_exc()}"


    def save_session(self, name: str):
        """Save session with UTF-8 encoding"""
        session_file = self._session_dir / f"{name}.pkl"
        user_ns = self.user_ns.copy()
        output_history = self.output_history.copy()

        # Ensure all strings are properly encoded
        for key, value in user_ns.items():
            try:
                if isinstance(value, str):
                    value = value.encode('utf-8').decode('utf-8')
                pickle.dumps(value)
            except Exception:
                user_ns[key] = f"not serializable: {str(value)}"

        for key, value in output_history.items():
            try:
                if isinstance(value, dict):
                    for k, v in value.items():
                        if isinstance(v, str):
                            value[k] = v.encode('utf-8').decode('utf-8')
                pickle.dumps(value)
            except Exception:
                output_history[key] = f"not serializable: {str(value)}"


        session_data = {
            'user_ns': user_ns,
            'output_history': output_history,

        }

        with open(session_file, 'wb') as f:
            pickle.dump(session_data, f)

        # Save VFS state with UTF-8 encoding
        vfs_state_file = self._session_dir / f"{name}_vfs.json"
        with open(vfs_state_file, 'w', encoding='utf-8') as f:
            json.dump(self.vfs.virtual_files, f, ensure_ascii=False)

    def load_session(self, name: str):
        """Load session with UTF-8 encoding"""
        session_file = self._session_dir / f"{name}.pkl"
        if session_file.exists():
            with open(session_file, 'rb') as f:
                session_data = pickle.load(f)
                # self.user_ns.update(session_data['user_ns'])
                self.output_history.update(session_data['output_history'])

        # Load VFS state with UTF-8 encoding
        vfs_state_file = self._session_dir / f"{name}_vfs.json"
        if vfs_state_file.exists():
            with open(vfs_state_file, encoding='utf-8') as f:
                self.vfs.virtual_files = json.load(f)

    def __str__(self):
        """String representation of current session"""
        output = []
        for count, data in self.output_history.items():
            output.append(f"In [{count}]: {data['code']}")
            if data['stdout']:
                output.append(data['stdout'])
            if data['stderr']:
                output.append(f"Error: {data['stderr']}")
            if data['result'] is not None:
                output.append(f"Out[{count}]: {data['result']}")
        return "\n".join(output)
__str__()

String representation of current session

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
def __str__(self):
    """String representation of current session"""
    output = []
    for count, data in self.output_history.items():
        output.append(f"In [{count}]: {data['code']}")
        if data['stdout']:
            output.append(data['stdout'])
        if data['stderr']:
            output.append(f"Error: {data['stderr']}")
        if data['result'] is not None:
            output.append(f"Out[{count}]: {data['result']}")
    return "\n".join(output)
get_namespace()

Get current namespace

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
611
612
613
def get_namespace(self) -> dict[str, Any]:
    """Get current namespace"""
    return self.user_ns.copy()
load_session(name)

Load session with UTF-8 encoding

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
def load_session(self, name: str):
    """Load session with UTF-8 encoding"""
    session_file = self._session_dir / f"{name}.pkl"
    if session_file.exists():
        with open(session_file, 'rb') as f:
            session_data = pickle.load(f)
            # self.user_ns.update(session_data['user_ns'])
            self.output_history.update(session_data['output_history'])

    # Load VFS state with UTF-8 encoding
    vfs_state_file = self._session_dir / f"{name}_vfs.json"
    if vfs_state_file.exists():
        with open(vfs_state_file, encoding='utf-8') as f:
            self.vfs.virtual_files = json.load(f)
modify_code(code=None, object_name=None, file=None) async
Modify existing code in memory (user namespace) and optionally in the corresponding file.

This method updates variables, functions, or methods in the current Python session and can
also update the corresponding source file if specified.

Args:
    code: New value or implementation for the object
    object_name: Name of the object to modify (variable, function, or method)
    file: Path to the file to update (if None, only updates in memory)

Returns:
    String describing the modification result

Examples:

# 1. Update a variable in memory
await ipython.modify_code(code="5", object_name="x")
2. Change a method implementation

await ipython.modify_code( code='"""def sound(self): return "Woof""""', object_name="Dog.sound" )

3. Modify a function

await ipython.modify_code( code='"""def calculate_age(): return 25"""', object_name="calculate_age" )

4. Update variable in memory and file

await ipython.modify_code( code="100", object_name="MAX_SIZE", file="config.py" )

5. Modifying an attribute in init

await ipython.modify_code( code='"""def init(self): self.name = "Buddy""""', object_name="Dog.init" )

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
async def modify_code(self, code: str = None, object_name: str = None, file: str = None) -> str:
    '''
    Modify existing code in memory (user namespace) and optionally in the corresponding file.

    This method updates variables, functions, or methods in the current Python session and can
    also update the corresponding source file if specified.

    Args:
        code: New value or implementation for the object
        object_name: Name of the object to modify (variable, function, or method)
        file: Path to the file to update (if None, only updates in memory)

    Returns:
        String describing the modification result

    Examples:

    # 1. Update a variable in memory
    await ipython.modify_code(code="5", object_name="x")

# 2. Change a method implementation
await ipython.modify_code(
    code='"""def sound(self):\n        return "Woof""""',
    object_name="Dog.sound"
)

# 3. Modify a function
await ipython.modify_code(
    code='"""def calculate_age():\n    return 25"""',
    object_name="calculate_age"
)

# 4. Update variable in memory and file
await ipython.modify_code(
    code="100",
    object_name="MAX_SIZE",
    file="config.py"
)

# 5. Modifying an attribute in __init__
await ipython.modify_code(
    code='"""def __init__(self):\n        self.name = "Buddy""""',
    object_name="Dog.__init__"
)
    '''
    try:
        if not object_name:
            raise ValueError("Object name must be specified")
        if code is None:
            raise ValueError("New code or value must be provided")

        # Process object name (handle methods with parentheses)
        clean_object_name = object_name.replace("()", "")

        # Step 1: Update in memory (user namespace)
        result_message = []

        # Handle different types of objects
        if "." in clean_object_name:
            # For methods or class attributes
            parts = clean_object_name.split(".")
            base_obj_name = parts[0]
            attr_name = parts[1]

            if base_obj_name not in self.user_ns:
                raise ValueError(f"Object '{base_obj_name}' not found in namespace")

            base_obj = self.user_ns[base_obj_name]

            # Handle method definitions which are passed as docstrings
            if code.split('\n'):
                method_code = code

                # Parse the method code to extract its body
                method_ast = ast.parse(method_code).body[0]
                method_name = method_ast.name

                # Create a new function object from the code
                method_locals = {}
                exec(
                    f"def _temp_func{signature(getattr(base_obj.__class__, attr_name, None))}: {method_ast.body[0].value.s}",
                    globals(), method_locals)
                new_method = method_locals['_temp_func']

                # Set the method on the class
                setattr(base_obj.__class__, attr_name, new_method)
                result_message.append(f"Updated method '{clean_object_name}' in memory")
            else:
                # For simple attributes
                setattr(base_obj, attr_name, eval(code, self.user_ns))
                result_message.append(f"Updated attribute '{clean_object_name}' in memory")
        else:
            # For variables and functions
            if code.startswith('"""') and code.endswith('"""'):
                # Handle function definitions
                func_code = code.strip('"""')
                func_ast = ast.parse(func_code).body[0]
                func_name = func_ast.name

                # Create a new function object from the code
                func_locals = {}
                exec(f"{func_code}", globals(), func_locals)
                self.user_ns[clean_object_name] = func_locals[func_name]
                result_message.append(f"Updated function '{clean_object_name}' in memory")
            else:
                # Simple variable assignment
                self.user_ns[clean_object_name] = eval(code, self.user_ns)
                result_message.append(f"Updated variable '{clean_object_name}' in memory")

        # Step 2: Update in file if specified
        if file is not None:
            file_path = self.vfs._resolve_path(file)

            if not file_path.exists():
                self.user_ns['__file__'] = str(file_path)
                return await self.run_cell(code)

            # Read original content
            original_content = self.vfs.read_file(file_path)
            updated_content = original_content

            # Handle different object types for file updates
            if "." in clean_object_name:
                # For methods
                parts = clean_object_name.split(".")
                class_name = parts[0]
                method_name = parts[1]

                if code.startswith('"""') and code.endswith('"""'):
                    method_code = code.strip('"""')

                    # Use ast to parse the file and find the method to replace
                    file_ast = ast.parse(original_content)
                    for node in ast.walk(file_ast):
                        if isinstance(node, ast.ClassDef) and node.name == class_name:
                            for method in node.body:
                                if isinstance(method, ast.FunctionDef) and method.name == method_name:
                                    # Find the method in the source code
                                    method_pattern = fr"def {method_name}.*?:(.*?)(?=\n    \w|\n\w|\Z)"
                                    method_match = re.search(method_pattern, original_content, re.DOTALL)

                                    if method_match:
                                        indentation = re.match(r"^(\s*)", method_match.group(0)).group(1)
                                        method_indented = textwrap.indent(method_code, indentation)
                                        updated_content = original_content.replace(
                                            method_match.group(0),
                                            method_indented
                                        )
                                        self.vfs.write_file(file_path, updated_content)
                                        result_message.append(
                                            f"Updated method '{clean_object_name}' in file '{file}'")
            else:
                # For variables and functions
                if code.startswith('"""') and code.endswith('"""'):
                    # Handle function updates
                    func_code = code.strip('"""')
                    func_pattern = fr"def {clean_object_name}.*?:(.*?)(?=\n\w|\Z)"
                    func_match = re.search(func_pattern, original_content, re.DOTALL)

                    if func_match:
                        indentation = re.match(r"^(\s*)", func_match.group(0)).group(1)
                        func_indented = textwrap.indent(func_code, indentation)
                        updated_content = original_content.replace(
                            func_match.group(0),
                            func_indented
                        )
                        self.vfs.write_file(file_path, updated_content)
                        result_message.append(f"Updated function '{clean_object_name}' in file '{file}'")
                else:
                    # Handle variable updates
                    var_pattern = fr"{clean_object_name}\s*=.*"
                    var_replacement = f"{clean_object_name} = {code}"
                    updated_content = re.sub(var_pattern, var_replacement, original_content)

                    if updated_content != original_content:
                        self.vfs.write_file(file_path, updated_content)
                        result_message.append(f"Updated variable '{clean_object_name}' in file '{file}'")
                    else:
                        result_message.append(f"Could not find variable '{clean_object_name}' in file '{file}'")

        return "\n".join(result_message)

    except Exception as e:
        return f"Error during code modification: {str(e)}\n{traceback.format_exc()}"
reset()

Reset the interpreter state

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
def reset(self):
    """Reset the interpreter state"""
    self.user_ns = {
        '__name__': '__main__',
        '__builtins__': __builtins__,
        'toolboxv2': toolboxv2,
        '__file__': None,
        '__path__': [str(self.vfs.current_dir)],
        'auto_install': auto_install,
        'modify_code': self.modify_code,
    }
    self.output_history.clear()
    self._execution_count = 0
    if self.auto_remove:
        shutil.rmtree(self.vfs.base_dir, ignore_errors=True)
run_cell(code, live_output=True) async

Async version of run_cell that handles both sync and async code

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
async def run_cell(self, code: str, live_output: bool = True) -> Any:
    """Async version of run_cell that handles both sync and async code"""
    result = None
    error = None
    tb = None
    original_dir = os.getcwd()

    if live_output:
        stdout_buffer = io.StringIO()
        stderr_buffer = io.StringIO()
        stdout = TeeStream(sys.__stdout__, stdout_buffer)
        stderr = TeeStream(sys.__stderr__, stderr_buffer)
    else:
        stdout = io.StringIO()
        stderr = io.StringIO()

    try:
        # Check if a file is already specified
        original_file = self.user_ns.get('__file__')
        if original_file is None:
            # Create temp file if no file specified
            temp_file = self.vfs.write_file(
                f'src/temp/_temp_{self._execution_count}.py',
                code
            )
            # work_ns = self.user_ns.copy()
            self.user_ns['__file__'] = str(temp_file)
        else:
            # Use existing file
            temp_file = Path(original_file)
            # Write code to the existing file
            self.vfs.write_file(temp_file, code)
            #work_ns = self.user_ns.copy()

        self.user_ns['__builtins__'] = __builtins__
        with VirtualEnvContext(self._venv_path) as python_exec:
            try:
                exec_code, eval_code, is_async, has_top_level_await = self._parse_code(
                    code.encode('utf-8', errors='replace').decode('utf-8')
                )
                if exec_code is None:
                    return "No executable code"
                os.makedirs(str(temp_file.parent.absolute()), exist_ok=True)
                os.chdir(str(temp_file.parent.absolute()))
                self.user_ns['PYTHON_EXEC'] = python_exec

                with redirect_stdout(stdout), redirect_stderr(stderr):
                    if has_top_level_await:
                        try:
                            # Execute wrapped code and await it
                            exec(exec_code, self.user_ns)
                            result = self.user_ns['__wrapper']()
                            if asyncio.iscoroutine(result):
                                result = await result
                        finally:
                            self.user_ns.pop('__wrapper', None)
                    elif is_async:
                        # Execute async code
                        exec(exec_code, self.user_ns)
                        if eval_code:
                            result = eval(eval_code, self.user_ns)
                            if asyncio.iscoroutine(result):
                                result = await result
                    else:
                        # Execute sync code
                        exec(exec_code, self.user_ns)
                        if eval_code:
                            result = eval(eval_code, self.user_ns)

                    if result is not None:
                        self.user_ns['_'] = result
            except KeyboardInterrupt:
                print("Stop execution manuel!")

            except Exception as e:
                error = str(e)
                tb = traceback.format_exc()
                if live_output:
                    sys.__stderr__.write(f"{error}\n{tb}")
                stderr.write(f"{error}\n{tb}")

            finally:
                os.chdir(original_dir)
                self._execution_count += 1
                # self.user_ns = work_ns.copy()
                if live_output:
                    stdout_value = stdout_buffer.getvalue()
                    stderr_value = stderr_buffer.getvalue()
                else:
                    stdout_value = stdout.getvalue()
                    stderr_value = stderr.getvalue()

                output = {
                    'code': code,
                    'stdout': stdout_value,
                    'stderr': stderr_value,
                    'result': result if result else "stdout"
                }
                self.output_history[self._execution_count] = output

    except Exception as e:
        error_msg = f"Error executing code: {str(e)}\n{traceback.format_exc()}"
        if live_output:
            sys.__stderr__.write(error_msg)
        return error_msg

    if not result:
        result = ""
    if output['stdout']:
        result = f"{result}\nstdout:{output['stdout']}"
    if output['stderr']:
        result = f"{result}\nstderr:{output['stderr']}"

    if self.auto_remove and original_file is None:
        # Only remove temp files, not user-specified files
        self.vfs.delete_file(temp_file)

    return result
save_session(name)

Save session with UTF-8 encoding

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
def save_session(self, name: str):
    """Save session with UTF-8 encoding"""
    session_file = self._session_dir / f"{name}.pkl"
    user_ns = self.user_ns.copy()
    output_history = self.output_history.copy()

    # Ensure all strings are properly encoded
    for key, value in user_ns.items():
        try:
            if isinstance(value, str):
                value = value.encode('utf-8').decode('utf-8')
            pickle.dumps(value)
        except Exception:
            user_ns[key] = f"not serializable: {str(value)}"

    for key, value in output_history.items():
        try:
            if isinstance(value, dict):
                for k, v in value.items():
                    if isinstance(v, str):
                        value[k] = v.encode('utf-8').decode('utf-8')
            pickle.dumps(value)
        except Exception:
            output_history[key] = f"not serializable: {str(value)}"


    session_data = {
        'user_ns': user_ns,
        'output_history': output_history,

    }

    with open(session_file, 'wb') as f:
        pickle.dump(session_data, f)

    # Save VFS state with UTF-8 encoding
    vfs_state_file = self._session_dir / f"{name}_vfs.json"
    with open(vfs_state_file, 'w', encoding='utf-8') as f:
        json.dump(self.vfs.virtual_files, f, ensure_ascii=False)
update_namespace(variables)

Update namespace with new variables

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
615
616
617
def update_namespace(self, variables: dict[str, Any]):
    """Update namespace with new variables"""
    self.user_ns.update(variables)
ParentNodeTransformer

Bases: NodeTransformer

Add parent references to AST nodes

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
462
463
464
465
466
467
class ParentNodeTransformer(ast.NodeTransformer):
    """Add parent references to AST nodes"""
    def visit(self, node):
        for child in ast.iter_child_nodes(node):
            child.parent = node
        return super().visit(node)
SyncReport dataclass

Report of variables synced from namespace to pipeline

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
@dataclass
class SyncReport:
    """Report of variables synced from namespace to pipeline"""
    added: dict[str, str]
    skipped: dict[str, str]  # var_name -> reason
    errors: dict[str, str]  # var_name -> error message

    def __str__(self) -> str:
        parts = []
        if self.added:
            parts.append("Added variables:")
            for name, type_ in self.added.items():
                parts.append(f"  - {name}: {type_}")
        if self.skipped:
            parts.append("\nSkipped variables:")
            for name, reason in self.skipped.items():
                parts.append(f"  - {name}: {reason}")
        if self.errors:
            parts.append("\nErrors:")
            for name, error in self.errors.items():
                parts.append(f"  - {name}: {error}")
        return "\n".join(parts)
TeeStream

Stream that writes to both console and buffer

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
class TeeStream:
    """Stream that writes to both console and buffer"""
    def __init__(self, console_stream, buffer_stream):
        self.console_stream = console_stream
        self.buffer_stream = buffer_stream

    def write(self, data):
        self.console_stream.write(data)
        self.buffer_stream.write(data)
        self.console_stream.flush()  # Ensure immediate console output

    def flush(self):
        self.console_stream.flush()
        self.buffer_stream.flush()
ToolsInterface

Minimalistic tools interface for LLMs providing code execution, virtual file system, and browser interaction capabilities.

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
class ToolsInterface:
    """
    Minimalistic tools interface for LLMs providing code execution,
    virtual file system, and browser interaction capabilities.
    """

    def __init__(self,
                 session_dir: str | None = None,
                 auto_remove: bool = True,
                 variables: dict[str, Any] | None = None,
                 variable_manager: Any | None = None):
        """
        Initialize the tools interface.

        Args:
            session_dir: Directory for session storage
            auto_remove: Whether to auto-remove temporary files
            variables: Initial variables dictionary
            variable_manager: External variable manager instance
            web_llm: LLM model for web interactions
        """
        self._session_dir = Path(session_dir) if session_dir else Path(get_app().appdata) / '.tools_sessions'
        self._session_dir.mkdir(exist_ok=True)
        self.auto_remove = auto_remove
        self.variable_manager = variable_manager

        # Initialize Python execution environment
        self.ipython = MockIPython(self._session_dir, auto_remove=auto_remove)
        if variables:
            self.ipython.user_ns.update(variables)

        # Initialize virtual file system
        self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')

        # Initialize Rust interface
        self.cargo = CargoRustInterface(self._session_dir, auto_remove=auto_remove)

        # Track execution state
        self._execution_history = []
        self._current_file = None

    async def execute_python(self, code: str) -> str:
        """
        Execute Python code in the virtual environment.

        Args:
            code: Python code to execute

        Returns:
            Execution result as string
        """
        try:
            result = await self.ipython.run_cell(code, live_output=False)

            # Update variable manager if available
            if self.variable_manager:
                for key, value in self.ipython.user_ns.items():
                    if not key.startswith('_') and key not in ['__name__', '__builtins__']:
                        try:
                            self.variable_manager.set(f"python.{key}", value)
                        except:
                            pass  # Ignore non-serializable variables

            self._execution_history.append(('python', code, result))
            return str(result) if result else "Execution completed"

        except Exception as e:
            error_msg = f"Python execution error: {str(e)}\n{traceback.format_exc()}"
            self._execution_history.append(('python', code, error_msg))
            return error_msg

    async def execute_rust(self, code: str) -> str:
        """
        Execute Rust code using Cargo.

        Args:
            code: Rust code to execute

        Returns:
            Execution result as string
        """
        try:
            # Setup project if needed
            if not self.cargo.current_project:
                await self.cargo.setup_project("temp_rust_project")

            result = await self.cargo.run_code(code)
            self._execution_history.append(('rust', code, result))
            return result

        except Exception as e:
            error_msg = f"Rust execution error: {str(e)}"
            self._execution_history.append(('rust', code, error_msg))
            return error_msg

    async def write_file(self, filepath: str, content: str) -> str:
        """
        Write content to a file in the virtual file system.

        Args:
            filepath: Path to the file
            content: Content to write

        Returns:
            Success message
        """
        try:
            abs_path = self.vfs.write_file(filepath, content)

            # Update variable manager if available
            if self.variable_manager:
                self.variable_manager.set(f"files.{filepath.replace('/', '.')}", {
                    'path': str(abs_path),
                    'size': len(content),
                    'content_preview': content[:100] + '...' if len(content) > 100 else content
                })

            return f"File written successfully: {abs_path}"

        except Exception as e:
            return f"File write error: {str(e)}"

    async def replace_in_file(self, filepath: str, old_content: str, new_content: str, precise: bool = True) -> str:
        """
        Replace exact content in file with new content.

        Args:
            filepath: Path to the file
            old_content: Exact content to replace (empty string for insertion at start)
            new_content: Content to replace with
            precise: If True, requires exact match; if False, allows single occurrence replacement

        Returns:
            Success message or error
        """
        try:
            # Read current file content
            try:
                current_content = self.vfs.read_file(filepath)
            except:
                return f"Error: File '{filepath}' not found or cannot be read"

            # Handle insertion at start (empty old_content)
            if not old_content:
                updated_content = new_content + current_content
                self.vfs.write_file(filepath, updated_content)
                return f"Content inserted at start of '{filepath}'"

            # Check if old_content exists
            if old_content not in current_content:
                return f"Error: Old content not found in '{filepath}' use read_file to check."

            # Count occurrences
            occurrences = current_content.count(old_content)

            if precise and occurrences > 1:
                return f"Error: Found {occurrences} occurrences of old content. Use precise=False to replace first occurrence."

            # Replace content (first occurrence if multiple)
            updated_content = current_content.replace(old_content, new_content, 1)

            # Write updated content
            self.vfs.write_file(filepath, updated_content)

            return f"Successfully replaced content in '{filepath}' ({occurrences} occurrence{'s' if occurrences > 1 else ''} found, 1 replaced)"

        except Exception as e:
            return f"Replace error: {str(e)}"

    async def read_file(self, filepath: str) -> str:
        """
        Read content from a file in the virtual file system.

        Args:
            filepath: Path to the file

        Returns:
            File content or error message
        """
        try:
            content = self.vfs.read_file(filepath)

            # Update variable manager if available
            if self.variable_manager:
                self.variable_manager.set("files.last_read", {
                    'path': filepath,
                    'size': len(content),
                    'content_preview': content[:200] + '...' if len(content) > 200 else content
                })

            return content

        except Exception as e:
            return f"File read error: {str(e)}"

    async def list_directory(self, dirpath: str = '.') -> str:
        """
        List contents of a directory.

        Args:
            dirpath: Directory path to list

        Returns:
            Directory listing as string
        """
        try:
            contents = self.vfs.list_directory(dirpath)
            listing = "\n".join(f"- {item}" for item in contents)

            # Update variable manager if available
            if self.variable_manager:
                self.variable_manager.set("files.last_listing", {
                    'directory': dirpath,
                    'items': contents,
                    'count': len(contents)
                })

            return f"Directory '{dirpath}' contents:\n{listing}"

        except Exception as e:
            return f"Directory listing error: {str(e)}"

    async def create_directory(self, dirpath: str) -> str:
        """
        Create a new directory.

        Args:
            dirpath: Path of directory to create

        Returns:
            Success message
        """
        try:
            abs_path = self.vfs.create_directory(dirpath)
            return f"Directory created successfully: {abs_path}"

        except Exception as e:
            return f"Directory creation error: {str(e)}"

    async def set_base_directory(self, path: str) -> str:
        """
        Set the base directory for the virtual file system.

        Args:
            path: New base directory path

        Returns:
            Success message
        """
        try:
            new_path = Path(path)
            new_path.mkdir(parents=True, exist_ok=True)
            self.vfs.base_dir = new_path
            self.vfs.current_dir = new_path

            return f"Base directory set to: {new_path}"

        except Exception as e:
            return f"Set base directory error: {str(e)}"

    async def set_current_file(self, filepath: str) -> str:
        """
        Set the current file for Python execution context.

        Args:
            filepath: Path to set as current file

        Returns:
            Success message
        """
        try:
            abs_path = self.vfs._resolve_path(filepath)
            self.ipython.user_ns['__file__'] = str(abs_path)
            self._current_file = str(abs_path)

            return f"Current file set to: {abs_path}"

        except Exception as e:
            return f"Set current file error: {str(e)}"

    async def install_package(self, package_name: str, version: str | None = None) -> str:
        """
        Install a Python package in the virtual environment.

        Args:
            package_name: Name of the package to install
            version: Optional specific version to install

        Returns:
            Installation result
        """
        try:
            code = f"""
auto_install('{package_name}'{f", version='{version}'" if version else ""})
import {package_name.split('[')[0]}  # Import base package name
print(f"Successfully imported {package_name}")
"""
            result = await self.execute_python(code)
            return result

        except Exception as e:
            return f"Package installation error: {str(e)}"

    async def get_execution_history(self) -> str:
        """
        Get the execution history.

        Returns:
            Execution history as formatted string
        """
        if not self._execution_history:
            return "No execution history available."

        history_lines = []
        for i, (lang, code, result) in enumerate(self._execution_history[-10:], 1):
            history_lines.append(f"[{i}] {lang.upper()}:")
            history_lines.append(f"    Code: {code[:100]}..." if len(code) > 100 else f"    Code: {code}")
            history_lines.append(
                f"    Result: {str(result)[:200]}..." if len(str(result)) > 200 else f"    Result: {result}")
            history_lines.append("")

        return "\n".join(history_lines)

    async def clear_session(self) -> str:
        """
        Clear the current session (variables, history, files).

        Returns:
            Success message
        """
        try:
            # Reset Python environment
            self.ipython.reset()

            # Clear execution history
            self._execution_history.clear()

            # Clear VFS if auto_remove is enabled
            if self.auto_remove:
                shutil.rmtree(self.vfs.base_dir, ignore_errors=True)
                self.vfs.base_dir.mkdir(parents=True, exist_ok=True)
                self.vfs.virtual_files.clear()

            # Reset current file
            self._current_file = None

            return "Session cleared successfully"

        except Exception as e:
            return f"Clear session error: {str(e)}"

    async def get_variables(self) -> str:
        """
        Get current variables in JSON format.

        Returns:
            Variables as JSON string
        """
        try:
            # Get Python variables
            py_vars = {}
            for key, value in self.ipython.user_ns.items():
                if not key.startswith('_') and key not in ['__name__', '__builtins__']:
                    try:
                        # Try to serialize the value
                        json.dumps(value, default=str)
                        py_vars[key] = str(value)[:200] if len(str(value)) > 200 else value
                    except:
                        py_vars[key] = f"<{type(value).__name__}>"

            result = {
                'python_variables': py_vars,
                'current_file': self._current_file,
                'vfs_base': str(self.vfs.base_dir),
                'execution_count': len(self._execution_history)
            }

            return json.dumps(result, indent=2, default=str)

        except Exception as e:
            return f"Get variables error: {str(e)}"

    def get_tools(self) -> list[tuple[Any, str, str]]:
        """
        Get all available tools as list of tuples (function, name, description).

        Returns:
            List of tool tuples
        """
        tools = [
            # Code execution tools
            (self.execute_python, "execute_python",
             "Execute Python code in virtual environment. Args: code (str) -> str"),

            (self.execute_rust, "execute_rust",
             "Execute Rust code using Cargo. Args: code (str) -> str"),

            # File system tools
            (self.write_file, "write_file",
             "Write content to file in virtual filesystem. Args: filepath (str), content (str) -> str"),

            (self.write_file, "create_file",
             "Write content to file in virtual filesystem. Args: filepath (str), content (str) -> str"),

            (self.replace_in_file, "replace_in_file",
             "Replace exact content in file. Args: filepath (str), old_content (str), new_content (str), precise (bool) = True -> str"),

            (self.read_file, "read_file",
             "Read content from file in virtual filesystem. Args: filepath (str) -> str"),

            (self.list_directory, "list_directory",
             "List directory contents. Args: dirpath (str) = '.' -> str"),

            (self.create_directory, "create_directory",
             "Create new directory. Args: dirpath (str) -> str"),

            # Configuration tools
            (self.set_base_directory, "set_base_directory",
             "Set base directory for virtual filesystem. Args: path (str) -> str"),

            (self.set_current_file, "set_current_file",
             "Set current file for Python execution context. Args: filepath (str) -> str"),

            (self.install_package, "install_package",
             "Install Python package. Args: package_name (str), version (Optional[str]) -> str"),

            # Session management tools
            (self.get_execution_history, "get_execution_history",
             "Get execution history. Args: None -> str"),

            (self.clear_session, "clear_session",
             "Clear current session. Args: None -> str"),

            (self.get_variables, "get_variables",
             "Get current variables as JSON. Args: None -> str"),
        ]

        return tools

    def __aenter__(self):
        return self

    async def __aexit__(self, *exe):
        await asyncio.sleep(0.01)
__init__(session_dir=None, auto_remove=True, variables=None, variable_manager=None)

Initialize the tools interface.

Parameters:

Name Type Description Default
session_dir str | None

Directory for session storage

None
auto_remove bool

Whether to auto-remove temporary files

True
variables dict[str, Any] | None

Initial variables dictionary

None
variable_manager Any | None

External variable manager instance

None
web_llm

LLM model for web interactions

required
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
def __init__(self,
             session_dir: str | None = None,
             auto_remove: bool = True,
             variables: dict[str, Any] | None = None,
             variable_manager: Any | None = None):
    """
    Initialize the tools interface.

    Args:
        session_dir: Directory for session storage
        auto_remove: Whether to auto-remove temporary files
        variables: Initial variables dictionary
        variable_manager: External variable manager instance
        web_llm: LLM model for web interactions
    """
    self._session_dir = Path(session_dir) if session_dir else Path(get_app().appdata) / '.tools_sessions'
    self._session_dir.mkdir(exist_ok=True)
    self.auto_remove = auto_remove
    self.variable_manager = variable_manager

    # Initialize Python execution environment
    self.ipython = MockIPython(self._session_dir, auto_remove=auto_remove)
    if variables:
        self.ipython.user_ns.update(variables)

    # Initialize virtual file system
    self.vfs = VirtualFileSystem(self._session_dir / 'virtual_fs')

    # Initialize Rust interface
    self.cargo = CargoRustInterface(self._session_dir, auto_remove=auto_remove)

    # Track execution state
    self._execution_history = []
    self._current_file = None
clear_session() async

Clear the current session (variables, history, files).

Returns:

Type Description
str

Success message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
async def clear_session(self) -> str:
    """
    Clear the current session (variables, history, files).

    Returns:
        Success message
    """
    try:
        # Reset Python environment
        self.ipython.reset()

        # Clear execution history
        self._execution_history.clear()

        # Clear VFS if auto_remove is enabled
        if self.auto_remove:
            shutil.rmtree(self.vfs.base_dir, ignore_errors=True)
            self.vfs.base_dir.mkdir(parents=True, exist_ok=True)
            self.vfs.virtual_files.clear()

        # Reset current file
        self._current_file = None

        return "Session cleared successfully"

    except Exception as e:
        return f"Clear session error: {str(e)}"
create_directory(dirpath) async

Create a new directory.

Parameters:

Name Type Description Default
dirpath str

Path of directory to create

required

Returns:

Type Description
str

Success message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
async def create_directory(self, dirpath: str) -> str:
    """
    Create a new directory.

    Args:
        dirpath: Path of directory to create

    Returns:
        Success message
    """
    try:
        abs_path = self.vfs.create_directory(dirpath)
        return f"Directory created successfully: {abs_path}"

    except Exception as e:
        return f"Directory creation error: {str(e)}"
execute_python(code) async

Execute Python code in the virtual environment.

Parameters:

Name Type Description Default
code str

Python code to execute

required

Returns:

Type Description
str

Execution result as string

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
async def execute_python(self, code: str) -> str:
    """
    Execute Python code in the virtual environment.

    Args:
        code: Python code to execute

    Returns:
        Execution result as string
    """
    try:
        result = await self.ipython.run_cell(code, live_output=False)

        # Update variable manager if available
        if self.variable_manager:
            for key, value in self.ipython.user_ns.items():
                if not key.startswith('_') and key not in ['__name__', '__builtins__']:
                    try:
                        self.variable_manager.set(f"python.{key}", value)
                    except:
                        pass  # Ignore non-serializable variables

        self._execution_history.append(('python', code, result))
        return str(result) if result else "Execution completed"

    except Exception as e:
        error_msg = f"Python execution error: {str(e)}\n{traceback.format_exc()}"
        self._execution_history.append(('python', code, error_msg))
        return error_msg
execute_rust(code) async

Execute Rust code using Cargo.

Parameters:

Name Type Description Default
code str

Rust code to execute

required

Returns:

Type Description
str

Execution result as string

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
async def execute_rust(self, code: str) -> str:
    """
    Execute Rust code using Cargo.

    Args:
        code: Rust code to execute

    Returns:
        Execution result as string
    """
    try:
        # Setup project if needed
        if not self.cargo.current_project:
            await self.cargo.setup_project("temp_rust_project")

        result = await self.cargo.run_code(code)
        self._execution_history.append(('rust', code, result))
        return result

    except Exception as e:
        error_msg = f"Rust execution error: {str(e)}"
        self._execution_history.append(('rust', code, error_msg))
        return error_msg
get_execution_history() async

Get the execution history.

Returns:

Type Description
str

Execution history as formatted string

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
async def get_execution_history(self) -> str:
    """
    Get the execution history.

    Returns:
        Execution history as formatted string
    """
    if not self._execution_history:
        return "No execution history available."

    history_lines = []
    for i, (lang, code, result) in enumerate(self._execution_history[-10:], 1):
        history_lines.append(f"[{i}] {lang.upper()}:")
        history_lines.append(f"    Code: {code[:100]}..." if len(code) > 100 else f"    Code: {code}")
        history_lines.append(
            f"    Result: {str(result)[:200]}..." if len(str(result)) > 200 else f"    Result: {result}")
        history_lines.append("")

    return "\n".join(history_lines)
get_tools()

Get all available tools as list of tuples (function, name, description).

Returns:

Type Description
list[tuple[Any, str, str]]

List of tool tuples

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
def get_tools(self) -> list[tuple[Any, str, str]]:
    """
    Get all available tools as list of tuples (function, name, description).

    Returns:
        List of tool tuples
    """
    tools = [
        # Code execution tools
        (self.execute_python, "execute_python",
         "Execute Python code in virtual environment. Args: code (str) -> str"),

        (self.execute_rust, "execute_rust",
         "Execute Rust code using Cargo. Args: code (str) -> str"),

        # File system tools
        (self.write_file, "write_file",
         "Write content to file in virtual filesystem. Args: filepath (str), content (str) -> str"),

        (self.write_file, "create_file",
         "Write content to file in virtual filesystem. Args: filepath (str), content (str) -> str"),

        (self.replace_in_file, "replace_in_file",
         "Replace exact content in file. Args: filepath (str), old_content (str), new_content (str), precise (bool) = True -> str"),

        (self.read_file, "read_file",
         "Read content from file in virtual filesystem. Args: filepath (str) -> str"),

        (self.list_directory, "list_directory",
         "List directory contents. Args: dirpath (str) = '.' -> str"),

        (self.create_directory, "create_directory",
         "Create new directory. Args: dirpath (str) -> str"),

        # Configuration tools
        (self.set_base_directory, "set_base_directory",
         "Set base directory for virtual filesystem. Args: path (str) -> str"),

        (self.set_current_file, "set_current_file",
         "Set current file for Python execution context. Args: filepath (str) -> str"),

        (self.install_package, "install_package",
         "Install Python package. Args: package_name (str), version (Optional[str]) -> str"),

        # Session management tools
        (self.get_execution_history, "get_execution_history",
         "Get execution history. Args: None -> str"),

        (self.clear_session, "clear_session",
         "Clear current session. Args: None -> str"),

        (self.get_variables, "get_variables",
         "Get current variables as JSON. Args: None -> str"),
    ]

    return tools
get_variables() async

Get current variables in JSON format.

Returns:

Type Description
str

Variables as JSON string

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
async def get_variables(self) -> str:
    """
    Get current variables in JSON format.

    Returns:
        Variables as JSON string
    """
    try:
        # Get Python variables
        py_vars = {}
        for key, value in self.ipython.user_ns.items():
            if not key.startswith('_') and key not in ['__name__', '__builtins__']:
                try:
                    # Try to serialize the value
                    json.dumps(value, default=str)
                    py_vars[key] = str(value)[:200] if len(str(value)) > 200 else value
                except:
                    py_vars[key] = f"<{type(value).__name__}>"

        result = {
            'python_variables': py_vars,
            'current_file': self._current_file,
            'vfs_base': str(self.vfs.base_dir),
            'execution_count': len(self._execution_history)
        }

        return json.dumps(result, indent=2, default=str)

    except Exception as e:
        return f"Get variables error: {str(e)}"
install_package(package_name, version=None) async

Install a Python package in the virtual environment.

Parameters:

Name Type Description Default
package_name str

Name of the package to install

required
version str | None

Optional specific version to install

None

Returns:

Type Description
str

Installation result

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
    async def install_package(self, package_name: str, version: str | None = None) -> str:
        """
        Install a Python package in the virtual environment.

        Args:
            package_name: Name of the package to install
            version: Optional specific version to install

        Returns:
            Installation result
        """
        try:
            code = f"""
auto_install('{package_name}'{f", version='{version}'" if version else ""})
import {package_name.split('[')[0]}  # Import base package name
print(f"Successfully imported {package_name}")
"""
            result = await self.execute_python(code)
            return result

        except Exception as e:
            return f"Package installation error: {str(e)}"
list_directory(dirpath='.') async

List contents of a directory.

Parameters:

Name Type Description Default
dirpath str

Directory path to list

'.'

Returns:

Type Description
str

Directory listing as string

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
async def list_directory(self, dirpath: str = '.') -> str:
    """
    List contents of a directory.

    Args:
        dirpath: Directory path to list

    Returns:
        Directory listing as string
    """
    try:
        contents = self.vfs.list_directory(dirpath)
        listing = "\n".join(f"- {item}" for item in contents)

        # Update variable manager if available
        if self.variable_manager:
            self.variable_manager.set("files.last_listing", {
                'directory': dirpath,
                'items': contents,
                'count': len(contents)
            })

        return f"Directory '{dirpath}' contents:\n{listing}"

    except Exception as e:
        return f"Directory listing error: {str(e)}"
read_file(filepath) async

Read content from a file in the virtual file system.

Parameters:

Name Type Description Default
filepath str

Path to the file

required

Returns:

Type Description
str

File content or error message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
async def read_file(self, filepath: str) -> str:
    """
    Read content from a file in the virtual file system.

    Args:
        filepath: Path to the file

    Returns:
        File content or error message
    """
    try:
        content = self.vfs.read_file(filepath)

        # Update variable manager if available
        if self.variable_manager:
            self.variable_manager.set("files.last_read", {
                'path': filepath,
                'size': len(content),
                'content_preview': content[:200] + '...' if len(content) > 200 else content
            })

        return content

    except Exception as e:
        return f"File read error: {str(e)}"
replace_in_file(filepath, old_content, new_content, precise=True) async

Replace exact content in file with new content.

Parameters:

Name Type Description Default
filepath str

Path to the file

required
old_content str

Exact content to replace (empty string for insertion at start)

required
new_content str

Content to replace with

required
precise bool

If True, requires exact match; if False, allows single occurrence replacement

True

Returns:

Type Description
str

Success message or error

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
async def replace_in_file(self, filepath: str, old_content: str, new_content: str, precise: bool = True) -> str:
    """
    Replace exact content in file with new content.

    Args:
        filepath: Path to the file
        old_content: Exact content to replace (empty string for insertion at start)
        new_content: Content to replace with
        precise: If True, requires exact match; if False, allows single occurrence replacement

    Returns:
        Success message or error
    """
    try:
        # Read current file content
        try:
            current_content = self.vfs.read_file(filepath)
        except:
            return f"Error: File '{filepath}' not found or cannot be read"

        # Handle insertion at start (empty old_content)
        if not old_content:
            updated_content = new_content + current_content
            self.vfs.write_file(filepath, updated_content)
            return f"Content inserted at start of '{filepath}'"

        # Check if old_content exists
        if old_content not in current_content:
            return f"Error: Old content not found in '{filepath}' use read_file to check."

        # Count occurrences
        occurrences = current_content.count(old_content)

        if precise and occurrences > 1:
            return f"Error: Found {occurrences} occurrences of old content. Use precise=False to replace first occurrence."

        # Replace content (first occurrence if multiple)
        updated_content = current_content.replace(old_content, new_content, 1)

        # Write updated content
        self.vfs.write_file(filepath, updated_content)

        return f"Successfully replaced content in '{filepath}' ({occurrences} occurrence{'s' if occurrences > 1 else ''} found, 1 replaced)"

    except Exception as e:
        return f"Replace error: {str(e)}"
set_base_directory(path) async

Set the base directory for the virtual file system.

Parameters:

Name Type Description Default
path str

New base directory path

required

Returns:

Type Description
str

Success message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
async def set_base_directory(self, path: str) -> str:
    """
    Set the base directory for the virtual file system.

    Args:
        path: New base directory path

    Returns:
        Success message
    """
    try:
        new_path = Path(path)
        new_path.mkdir(parents=True, exist_ok=True)
        self.vfs.base_dir = new_path
        self.vfs.current_dir = new_path

        return f"Base directory set to: {new_path}"

    except Exception as e:
        return f"Set base directory error: {str(e)}"
set_current_file(filepath) async

Set the current file for Python execution context.

Parameters:

Name Type Description Default
filepath str

Path to set as current file

required

Returns:

Type Description
str

Success message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
async def set_current_file(self, filepath: str) -> str:
    """
    Set the current file for Python execution context.

    Args:
        filepath: Path to set as current file

    Returns:
        Success message
    """
    try:
        abs_path = self.vfs._resolve_path(filepath)
        self.ipython.user_ns['__file__'] = str(abs_path)
        self._current_file = str(abs_path)

        return f"Current file set to: {abs_path}"

    except Exception as e:
        return f"Set current file error: {str(e)}"
write_file(filepath, content) async

Write content to a file in the virtual file system.

Parameters:

Name Type Description Default
filepath str

Path to the file

required
content str

Content to write

required

Returns:

Type Description
str

Success message

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
async def write_file(self, filepath: str, content: str) -> str:
    """
    Write content to a file in the virtual file system.

    Args:
        filepath: Path to the file
        content: Content to write

    Returns:
        Success message
    """
    try:
        abs_path = self.vfs.write_file(filepath, content)

        # Update variable manager if available
        if self.variable_manager:
            self.variable_manager.set(f"files.{filepath.replace('/', '.')}", {
                'path': str(abs_path),
                'size': len(content),
                'content_preview': content[:100] + '...' if len(content) > 100 else content
            })

        return f"File written successfully: {abs_path}"

    except Exception as e:
        return f"File write error: {str(e)}"
VirtualEnvContext

Context manager for temporary virtual environment activation

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
class VirtualEnvContext:
    """Context manager for temporary virtual environment activation"""

    def __init__(self, venv_path: Path):
        self.venv_path = venv_path
        self._original_path = None
        self._original_sys_path = None
        self._original_prefix = None
        self._original_virtual_env = None

    def _get_venv_paths(self):
        """Get virtual environment paths based on platform"""
        if sys.platform == 'win32':
            site_packages = self.venv_path / 'Lib' / 'site-packages'
            scripts_dir = self.venv_path / 'Scripts'
            python_path = scripts_dir / 'python.exe'
        else:
            python_version = f'python{sys.version_info.major}.{sys.version_info.minor}'
            site_packages = self.venv_path / 'lib' / python_version / 'site-packages'
            scripts_dir = self.venv_path / 'bin'
            python_path = scripts_dir / 'python'

        return site_packages, scripts_dir, python_path

    def __enter__(self):
        # Save original state
        self._original_path = os.environ.get('PATH', '')
        self._original_sys_path = sys.path.copy()
        self._original_prefix = sys.prefix
        self._original_virtual_env = os.environ.get('VIRTUAL_ENV')

        # Get venv paths
        site_packages, scripts_dir, python_path = self._get_venv_paths()

        # Modify environment for venv
        if scripts_dir.exists():
            new_path = os.pathsep.join([str(scripts_dir), self._original_path])
            os.environ['PATH'] = new_path

        if site_packages.exists():
            sys.path.insert(0, str(site_packages))

        os.environ['VIRTUAL_ENV'] = str(self.venv_path)

        # Return the python executable path for potential subprocess calls
        return str(python_path)

    def __exit__(self, exc_type, exc_val, exc_tb):
        # Restore original state
        os.environ['PATH'] = self._original_path
        sys.path = self._original_sys_path

        if self._original_virtual_env is None:
            os.environ.pop('VIRTUAL_ENV', None)
        else:
            os.environ['VIRTUAL_ENV'] = self._original_virtual_env
VirtualFileSystem
Source code in toolboxv2/mods/isaa/CodingAgent/live.py
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
class VirtualFileSystem:
    def __init__(self, base_dir: Path):
        self.base_dir = base_dir
        self.current_dir = base_dir
        self.virtual_files: dict[str, str] = {}
        self.base_dir.mkdir(parents=True, exist_ok=True)

    def write_file(self, filepath: str | Path, content: str) -> Path:
        """Write content to a virtual file and persist to disk using UTF-8"""
        try:
            abs_path = self._resolve_path(filepath)
        except ValueError:
            print("invalid :", filepath)
            filepath = "src/temp_js/_temp_fix.py"
            abs_path = self._resolve_path(filepath)
        abs_path.parent.mkdir(parents=True, exist_ok=True)

        # Store in virtual filesystem
        rel_path = str(abs_path.relative_to(self.base_dir))
        self.virtual_files[rel_path] = content

        # Write to actual filesystem with UTF-8 encoding
        with open(abs_path, 'w', encoding='utf-8', errors='replace') as f:
            f.write(content)

        return abs_path

    def read_file(self, filepath: str | Path) -> str:
        """Read content from a virtual file using UTF-8"""
        abs_path = self._resolve_path(filepath)
        if not abs_path.exists():
            raise FileNotFoundError(f"File not found: {filepath}")

        rel_path = str(abs_path.relative_to(self.base_dir))

        # Check virtual filesystem first
        if rel_path in self.virtual_files:
            return self.virtual_files[rel_path]

        # Fall back to reading from disk with UTF-8 encoding
        with open(abs_path, encoding='utf-8', errors='replace') as f:
            content = f.read()
            self.virtual_files[rel_path] = content
            return content

    def delete_file(self, filepath: str | Path):
        """Delete a virtual file"""
        abs_path = self._resolve_path(filepath)
        rel_path = str(abs_path.relative_to(self.base_dir))

        if rel_path in self.virtual_files:
            del self.virtual_files[rel_path]

        if abs_path.exists():
            abs_path.unlink()

    def create_directory(self, dirpath: str | Path):
        """Create a new directory"""
        abs_path = self._resolve_path(dirpath)
        abs_path.mkdir(parents=True, exist_ok=True)
        return abs_path


    def list_directory(self, dirpath: str | Path = '.') -> list:
        """List contents of a directory"""
        abs_path = self._resolve_path(dirpath)
        if not abs_path.exists():
            raise FileNotFoundError(f"Directory not found: {dirpath}")
        return [p.name for p in abs_path.iterdir()]

    def change_directory(self, dirpath: str | Path):
        """Change current working directory"""
        new_dir = self._resolve_path(dirpath)
        if not new_dir.exists() or not new_dir.is_dir():
            raise NotADirectoryError(f"Directory not found: {dirpath}")
        self.current_dir = new_dir

    def _resolve_path(self, filepath: str | Path) -> Path:
        """Convert relative path to absolute path"""
        filepath = Path(filepath)
        if filepath.is_absolute():
            if not str(filepath).startswith(str(self.base_dir)):
                raise ValueError("Path must be within base directory")
            return filepath
        return (self.current_dir / filepath).resolve()

    def save_state(self, state_file: Path):
        """Save virtual filesystem state to disk"""
        state = {
            'current_dir': str(self.current_dir.relative_to(self.base_dir)),
            'virtual_files': self.virtual_files
        }
        with open(state_file, 'w') as f:
            json.dump(state, f)

    def load_state(self, state_file: Path):
        """Load virtual filesystem state from disk"""
        if not state_file.exists():
            return

        with open(state_file) as f:
            state = json.load(f)
            self.current_dir = self.base_dir / state['current_dir']
            self.virtual_files = state['virtual_files']

    def print_file_structure(self, start_path: str | Path = '.', indent: str = ''):
        """Print the file structure starting from the given path"""
        start_path = self._resolve_path(start_path)
        if not start_path.exists():
            s = f"Path not found: {start_path}"
            return s

        s = f"{indent}{start_path.name}/"
        for item in sorted(start_path.iterdir()):
            if item.is_dir():
               s+= self.print_file_structure(item, indent + '  ')
            else:
                s = f"{indent}  {item.name}"
        return s
change_directory(dirpath)

Change current working directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
338
339
340
341
342
343
def change_directory(self, dirpath: str | Path):
    """Change current working directory"""
    new_dir = self._resolve_path(dirpath)
    if not new_dir.exists() or not new_dir.is_dir():
        raise NotADirectoryError(f"Directory not found: {dirpath}")
    self.current_dir = new_dir
create_directory(dirpath)

Create a new directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
324
325
326
327
328
def create_directory(self, dirpath: str | Path):
    """Create a new directory"""
    abs_path = self._resolve_path(dirpath)
    abs_path.mkdir(parents=True, exist_ok=True)
    return abs_path
delete_file(filepath)

Delete a virtual file

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
313
314
315
316
317
318
319
320
321
322
def delete_file(self, filepath: str | Path):
    """Delete a virtual file"""
    abs_path = self._resolve_path(filepath)
    rel_path = str(abs_path.relative_to(self.base_dir))

    if rel_path in self.virtual_files:
        del self.virtual_files[rel_path]

    if abs_path.exists():
        abs_path.unlink()
list_directory(dirpath='.')

List contents of a directory

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
331
332
333
334
335
336
def list_directory(self, dirpath: str | Path = '.') -> list:
    """List contents of a directory"""
    abs_path = self._resolve_path(dirpath)
    if not abs_path.exists():
        raise FileNotFoundError(f"Directory not found: {dirpath}")
    return [p.name for p in abs_path.iterdir()]
load_state(state_file)

Load virtual filesystem state from disk

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
363
364
365
366
367
368
369
370
371
def load_state(self, state_file: Path):
    """Load virtual filesystem state from disk"""
    if not state_file.exists():
        return

    with open(state_file) as f:
        state = json.load(f)
        self.current_dir = self.base_dir / state['current_dir']
        self.virtual_files = state['virtual_files']
print_file_structure(start_path='.', indent='')

Print the file structure starting from the given path

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
373
374
375
376
377
378
379
380
381
382
383
384
385
386
def print_file_structure(self, start_path: str | Path = '.', indent: str = ''):
    """Print the file structure starting from the given path"""
    start_path = self._resolve_path(start_path)
    if not start_path.exists():
        s = f"Path not found: {start_path}"
        return s

    s = f"{indent}{start_path.name}/"
    for item in sorted(start_path.iterdir()):
        if item.is_dir():
           s+= self.print_file_structure(item, indent + '  ')
        else:
            s = f"{indent}  {item.name}"
    return s
read_file(filepath)

Read content from a virtual file using UTF-8

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
def read_file(self, filepath: str | Path) -> str:
    """Read content from a virtual file using UTF-8"""
    abs_path = self._resolve_path(filepath)
    if not abs_path.exists():
        raise FileNotFoundError(f"File not found: {filepath}")

    rel_path = str(abs_path.relative_to(self.base_dir))

    # Check virtual filesystem first
    if rel_path in self.virtual_files:
        return self.virtual_files[rel_path]

    # Fall back to reading from disk with UTF-8 encoding
    with open(abs_path, encoding='utf-8', errors='replace') as f:
        content = f.read()
        self.virtual_files[rel_path] = content
        return content
save_state(state_file)

Save virtual filesystem state to disk

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
354
355
356
357
358
359
360
361
def save_state(self, state_file: Path):
    """Save virtual filesystem state to disk"""
    state = {
        'current_dir': str(self.current_dir.relative_to(self.base_dir)),
        'virtual_files': self.virtual_files
    }
    with open(state_file, 'w') as f:
        json.dump(state, f)
write_file(filepath, content)

Write content to a virtual file and persist to disk using UTF-8

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
def write_file(self, filepath: str | Path, content: str) -> Path:
    """Write content to a virtual file and persist to disk using UTF-8"""
    try:
        abs_path = self._resolve_path(filepath)
    except ValueError:
        print("invalid :", filepath)
        filepath = "src/temp_js/_temp_fix.py"
        abs_path = self._resolve_path(filepath)
    abs_path.parent.mkdir(parents=True, exist_ok=True)

    # Store in virtual filesystem
    rel_path = str(abs_path.relative_to(self.base_dir))
    self.virtual_files[rel_path] = content

    # Write to actual filesystem with UTF-8 encoding
    with open(abs_path, 'w', encoding='utf-8', errors='replace') as f:
        f.write(content)

    return abs_path
auto_install(package_name, install_method='pip', upgrade=False, quiet=False, version=None, extra_args=None)

Enhanced auto-save import with version and extra arguments support

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
def auto_install(package_name, install_method='pip', upgrade=False, quiet=False, version=None, extra_args=None):
    '''
    Enhanced auto-save import with version and extra arguments support
    '''
    try:
        # Attempt to import the package
        return importlib.import_module(package_name)
    except ImportError:
        # Package not found, prepare for installation
        print(f"Package '{package_name}' not found. Attempting to install...")
        try:
            # Determine Python executable based on virtual environment
            venv_path = os.environ.get('VIRTUAL_ENV')
            if venv_path:
                venv_path = Path(venv_path)
                if sys.platform == 'win32':
                    python_exec = str(venv_path / 'Scripts' / 'python.exe')
                else:
                    python_exec = str(venv_path / 'bin' / 'python')
                # Check if the Python executable exists
                if not Path(python_exec).exists():
                    python_exec = sys.executable
            else:
                python_exec = sys.executable

            # Construct installation command with more flexibility
            install_cmd = [python_exec, "-m", install_method, "install"]
            if upgrade:
                install_cmd.append("--upgrade")
            # Support specific version installation
            if version:
                install_cmd.append(f"{package_name}=={version}")
            else:
                install_cmd.append(package_name)
            # Add extra arguments if provided
            if extra_args:
                install_cmd.extend(extra_args)
            # Run installation with appropriate verbosity
            installation_output = subprocess.run(
                install_cmd,
                capture_output=quiet,
                text=True
            )
            # Check installation status
            if installation_output.returncode == 0:
                print(f"Successfully installed {package_name}")
                return importlib.import_module(package_name)
            else:
                raise Exception(f"Installation failed: {installation_output.stderr}")
        except Exception as install_error:
            print(f"Error installing {package_name}: {install_error}")
            return None
sync_globals_to_vars(pipeline, namespace=None, prefix=None, include_types=None, exclude_patterns=None, exclude_private=True, deep_copy=False, only_serializable=False)
Sync global variables or a specific namespace to pipeline variables.

Args:
    pipeline: Pipeline instance to sync variables to
    namespace: Optional dictionary of variables (defaults to globals())
    prefix: Optional prefix for variable names (e.g., 'global_')
    include_types: Only include variables of these types
    exclude_patterns: List of regex patterns to exclude
    exclude_private: Exclude variables starting with underscore
    deep_copy: Create deep copies of variables instead of references
    only_serializable: Only include variables that can be serialized

Returns:
    SyncReport with details about added, skipped and error variables

Usage example:
Basic usage - sync all globals

report = sync_globals_to_vars(pipeline)

Sync only numeric types with prefix

report = sync_globals_to_vars( pipeline, include_types=[int, float], prefix="global_" )

Sync from specific namespace

import numpy as np namespace = {"arr": np.array([1,2,3])} report = sync_globals_to_vars(pipeline, namespace=namespace)

Sync with deep copy and serialization check

report = sync_globals_to_vars( pipeline, deep_copy=True, only_serializable=True )

Source code in toolboxv2/mods/isaa/CodingAgent/live.py
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
def sync_globals_to_vars(
    pipeline: Any,
    namespace: dict[str, Any] | None = None,
    prefix: str | None = None,
    include_types: type | list[type] | None = None,
    exclude_patterns: list[str] | None = None,
    exclude_private: bool = True,
    deep_copy: bool = False,
    only_serializable: bool = False
) -> SyncReport:
    """
    Sync global variables or a specific namespace to pipeline variables.

    Args:
        pipeline: Pipeline instance to sync variables to
        namespace: Optional dictionary of variables (defaults to globals())
        prefix: Optional prefix for variable names (e.g., 'global_')
        include_types: Only include variables of these types
        exclude_patterns: List of regex patterns to exclude
        exclude_private: Exclude variables starting with underscore
        deep_copy: Create deep copies of variables instead of references
        only_serializable: Only include variables that can be serialized

    Returns:
        SyncReport with details about added, skipped and error variables

    Usage example:
# Basic usage - sync all globals
report = sync_globals_to_vars(pipeline)

# Sync only numeric types with prefix
report = sync_globals_to_vars(
    pipeline,
    include_types=[int, float],
    prefix="global_"
)

# Sync from specific namespace
import numpy as np
namespace = {"arr": np.array([1,2,3])}
report = sync_globals_to_vars(pipeline, namespace=namespace)

# Sync with deep copy and serialization check
report = sync_globals_to_vars(
    pipeline,
    deep_copy=True,
    only_serializable=True
)
    """
    # Initialize report
    report = SyncReport(
        added={},
        skipped={},
        errors={}
    )

    # Get namespace
    if namespace is None:
        # Get caller's globals
        namespace = currentframe().f_back.f_globals

    # Compile exclude patterns
    if exclude_patterns:
        patterns = [re.compile(pattern) for pattern in exclude_patterns]
    else:
        patterns = []

    # Normalize include_types
    if include_types and not isinstance(include_types, list | tuple | set):
        include_types = [include_types]
    def get_type_info(var: Any) -> str:
        """Helper to get detailed type information"""
        if isinstance(var, type):
            return f"class '{var.__name__}'"
        elif isinstance(var, BaseModel):
            return f"Pydantic model '{var.__class__.__name__}'"
        elif hasattr(var, '__class__'):
            type_name = var.__class__.__name__
            module_name = var.__class__.__module__
            if module_name != 'builtins':
                return f"{module_name}.{type_name}"
            return type_name
        return type(var).__name__
    # Process each variable
    for name, value in namespace.items():
        try:
            # Skip if matches exclude criteria
            if exclude_private and name.startswith('_'):
                report.skipped[name] = "private variable"
                continue

            if any(pattern.match(name) for pattern in patterns):
                report.skipped[name] = "matched exclude pattern"
                continue

            if include_types and not isinstance(value, tuple(include_types)):
                report.skipped[name] = f"type {type(value).__name__} not in include_types"
                continue

            # Test serialization if required
            if only_serializable:
                try:
                    import pickle
                    pickle.dumps(value)
                except Exception as e:
                    report.skipped[name] = f"not serializable: {str(e)}"
                    continue

            # Prepare variable
            var_value = deepcopy(value) if deep_copy else value
            var_name = f"{prefix}{name}" if prefix else name

            # Add to pipeline variables
            pipeline.variables[var_name] = var_value
            report.added[var_name] = get_type_info(value)

        except Exception as e:
            report.errors[name] = str(e)

    return report

base

Agent
agent
AgentCheckpoint dataclass

Enhanced AgentCheckpoint with UnifiedContextManager and ChatSession integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
@dataclass
class AgentCheckpoint:
    """Enhanced AgentCheckpoint with UnifiedContextManager and ChatSession integration"""
    timestamp: datetime
    agent_state: dict[str, Any]
    task_state: dict[str, Any]
    world_model: dict[str, Any]
    active_flows: list[str]
    metadata: dict[str, Any] = field(default_factory=dict)

    # NEUE: Enhanced checkpoint data for UnifiedContextManager integration
    session_data: dict[str, Any] = field(default_factory=dict)
    context_manager_state: dict[str, Any] = field(default_factory=dict)
    conversation_history: list[dict[str, Any]] = field(default_factory=list)
    variable_system_state: dict[str, Any] = field(default_factory=dict)
    results_store: dict[str, Any] = field(default_factory=dict)
    tool_capabilities: dict[str, Any] = field(default_factory=dict)
    variable_scopes: dict[str, Any] = field(default_factory=dict)

    # Optional: Additional system state
    performance_metrics: dict[str, Any] = field(default_factory=dict)
    execution_history: list[dict[str, Any]] = field(default_factory=list)

    def get_checkpoint_summary(self) -> str:
        """Get human-readable checkpoint summary"""
        try:
            summary_parts = []

            # Basic info
            if self.session_data:
                session_count = len([s for s in self.session_data.values() if s.get("status") != "failed"])
                summary_parts.append(f"{session_count} sessions")

            # Task info
            if self.task_state:
                completed_tasks = len([t for t in self.task_state.values() if t.get("status") == "completed"])
                total_tasks = len(self.task_state)
                summary_parts.append(f"{completed_tasks}/{total_tasks} tasks")

            # Conversation info
            if self.conversation_history:
                summary_parts.append(f"{len(self.conversation_history)} messages")

            # Context info
            if self.context_manager_state:
                cache_count = self.context_manager_state.get("cache_entries", 0)
                if cache_count > 0:
                    summary_parts.append(f"{cache_count} cached contexts")

            # Variable system info
            if self.variable_system_state:
                scopes = len(self.variable_system_state.get("scopes", {}))
                summary_parts.append(f"{scopes} variable scopes")

            # Tool capabilities
            if self.tool_capabilities:
                summary_parts.append(f"{len(self.tool_capabilities)} analyzed tools")

            return "; ".join(summary_parts) if summary_parts else "Basic checkpoint"

        except Exception as e:
            return f"Summary generation failed: {str(e)}"

    def get_storage_size_estimate(self) -> dict[str, int]:
        """Estimate storage size of different checkpoint components"""
        try:
            sizes = {}

            # Calculate sizes in bytes (approximate)
            sizes["agent_state"] = len(str(self.agent_state))
            sizes["task_state"] = len(str(self.task_state))
            sizes["world_model"] = len(str(self.world_model))
            sizes["conversation_history"] = len(str(self.conversation_history))
            sizes["session_data"] = len(str(self.session_data))
            sizes["context_manager_state"] = len(str(self.context_manager_state))
            sizes["variable_system_state"] = len(str(self.variable_system_state))
            sizes["results_store"] = len(str(self.results_store))
            sizes["tool_capabilities"] = len(str(self.tool_capabilities))

            sizes["total_bytes"] = sum(sizes.values())
            sizes["total_kb"] = sizes["total_bytes"] / 1024
            sizes["total_mb"] = sizes["total_kb"] / 1024

            return sizes

        except Exception as e:
            return {"error": str(e)}

    def validate_checkpoint_integrity(self) -> dict[str, Any]:
        """Validate checkpoint integrity and completeness"""
        validation = {
            "is_valid": True,
            "errors": [],
            "warnings": [],
            "completeness_score": 0.0,
            "components_present": []
        }

        try:
            # Check required components
            required_components = ["timestamp", "agent_state", "task_state", "world_model", "active_flows"]
            for component in required_components:
                if hasattr(self, component) and getattr(self, component) is not None:
                    validation["components_present"].append(component)
                else:
                    validation["errors"].append(f"Missing required component: {component}")
                    validation["is_valid"] = False

            # Check optional enhanced components
            enhanced_components = ["session_data", "context_manager_state", "conversation_history",
                                   "variable_system_state", "results_store", "tool_capabilities"]

            for component in enhanced_components:
                if hasattr(self, component) and getattr(self, component):
                    validation["components_present"].append(component)

            # Calculate completeness score
            total_possible = len(required_components) + len(enhanced_components)
            validation["completeness_score"] = len(validation["components_present"]) / total_possible

            # Check timestamp validity
            if isinstance(self.timestamp, datetime):
                age_hours = (datetime.now() - self.timestamp).total_seconds() / 3600
                if age_hours > 24:
                    validation["warnings"].append(f"Checkpoint is {age_hours:.1f} hours old")
            else:
                validation["errors"].append("Invalid timestamp format")
                validation["is_valid"] = False

            # Check session data consistency
            if self.session_data and self.conversation_history:
                session_ids_in_data = set(self.session_data.keys())
                session_ids_in_conversation = set(
                    msg.get("session_id") for msg in self.conversation_history
                    if msg.get("session_id")
                )

                if session_ids_in_data != session_ids_in_conversation:
                    validation["warnings"].append("Session data and conversation history session IDs don't match")

            return validation

        except Exception as e:
            validation["errors"].append(f"Validation error: {str(e)}")
            validation["is_valid"] = False
            return validation

    def get_version_info(self) -> dict[str, str]:
        """Get checkpoint version information"""
        return {
            "checkpoint_version": self.metadata.get("checkpoint_version", "1.0"),
            "data_format": "enhanced" if self.session_data or self.context_manager_state else "basic",
            "context_system": "unified" if self.context_manager_state else "legacy",
            "variable_system": "integrated" if self.variable_system_state else "basic",
            "session_management": "chatsession" if self.session_data else "memory_only",
            "created_with": "FlowAgent v2.0 Enhanced Context System"
        }
get_checkpoint_summary()

Get human-readable checkpoint summary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
def get_checkpoint_summary(self) -> str:
    """Get human-readable checkpoint summary"""
    try:
        summary_parts = []

        # Basic info
        if self.session_data:
            session_count = len([s for s in self.session_data.values() if s.get("status") != "failed"])
            summary_parts.append(f"{session_count} sessions")

        # Task info
        if self.task_state:
            completed_tasks = len([t for t in self.task_state.values() if t.get("status") == "completed"])
            total_tasks = len(self.task_state)
            summary_parts.append(f"{completed_tasks}/{total_tasks} tasks")

        # Conversation info
        if self.conversation_history:
            summary_parts.append(f"{len(self.conversation_history)} messages")

        # Context info
        if self.context_manager_state:
            cache_count = self.context_manager_state.get("cache_entries", 0)
            if cache_count > 0:
                summary_parts.append(f"{cache_count} cached contexts")

        # Variable system info
        if self.variable_system_state:
            scopes = len(self.variable_system_state.get("scopes", {}))
            summary_parts.append(f"{scopes} variable scopes")

        # Tool capabilities
        if self.tool_capabilities:
            summary_parts.append(f"{len(self.tool_capabilities)} analyzed tools")

        return "; ".join(summary_parts) if summary_parts else "Basic checkpoint"

    except Exception as e:
        return f"Summary generation failed: {str(e)}"
get_storage_size_estimate()

Estimate storage size of different checkpoint components

Source code in toolboxv2/mods/isaa/base/Agent/types.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
def get_storage_size_estimate(self) -> dict[str, int]:
    """Estimate storage size of different checkpoint components"""
    try:
        sizes = {}

        # Calculate sizes in bytes (approximate)
        sizes["agent_state"] = len(str(self.agent_state))
        sizes["task_state"] = len(str(self.task_state))
        sizes["world_model"] = len(str(self.world_model))
        sizes["conversation_history"] = len(str(self.conversation_history))
        sizes["session_data"] = len(str(self.session_data))
        sizes["context_manager_state"] = len(str(self.context_manager_state))
        sizes["variable_system_state"] = len(str(self.variable_system_state))
        sizes["results_store"] = len(str(self.results_store))
        sizes["tool_capabilities"] = len(str(self.tool_capabilities))

        sizes["total_bytes"] = sum(sizes.values())
        sizes["total_kb"] = sizes["total_bytes"] / 1024
        sizes["total_mb"] = sizes["total_kb"] / 1024

        return sizes

    except Exception as e:
        return {"error": str(e)}
get_version_info()

Get checkpoint version information

Source code in toolboxv2/mods/isaa/base/Agent/types.py
686
687
688
689
690
691
692
693
694
695
def get_version_info(self) -> dict[str, str]:
    """Get checkpoint version information"""
    return {
        "checkpoint_version": self.metadata.get("checkpoint_version", "1.0"),
        "data_format": "enhanced" if self.session_data or self.context_manager_state else "basic",
        "context_system": "unified" if self.context_manager_state else "legacy",
        "variable_system": "integrated" if self.variable_system_state else "basic",
        "session_management": "chatsession" if self.session_data else "memory_only",
        "created_with": "FlowAgent v2.0 Enhanced Context System"
    }
validate_checkpoint_integrity()

Validate checkpoint integrity and completeness

Source code in toolboxv2/mods/isaa/base/Agent/types.py
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
def validate_checkpoint_integrity(self) -> dict[str, Any]:
    """Validate checkpoint integrity and completeness"""
    validation = {
        "is_valid": True,
        "errors": [],
        "warnings": [],
        "completeness_score": 0.0,
        "components_present": []
    }

    try:
        # Check required components
        required_components = ["timestamp", "agent_state", "task_state", "world_model", "active_flows"]
        for component in required_components:
            if hasattr(self, component) and getattr(self, component) is not None:
                validation["components_present"].append(component)
            else:
                validation["errors"].append(f"Missing required component: {component}")
                validation["is_valid"] = False

        # Check optional enhanced components
        enhanced_components = ["session_data", "context_manager_state", "conversation_history",
                               "variable_system_state", "results_store", "tool_capabilities"]

        for component in enhanced_components:
            if hasattr(self, component) and getattr(self, component):
                validation["components_present"].append(component)

        # Calculate completeness score
        total_possible = len(required_components) + len(enhanced_components)
        validation["completeness_score"] = len(validation["components_present"]) / total_possible

        # Check timestamp validity
        if isinstance(self.timestamp, datetime):
            age_hours = (datetime.now() - self.timestamp).total_seconds() / 3600
            if age_hours > 24:
                validation["warnings"].append(f"Checkpoint is {age_hours:.1f} hours old")
        else:
            validation["errors"].append("Invalid timestamp format")
            validation["is_valid"] = False

        # Check session data consistency
        if self.session_data and self.conversation_history:
            session_ids_in_data = set(self.session_data.keys())
            session_ids_in_conversation = set(
                msg.get("session_id") for msg in self.conversation_history
                if msg.get("session_id")
            )

            if session_ids_in_data != session_ids_in_conversation:
                validation["warnings"].append("Session data and conversation history session IDs don't match")

        return validation

    except Exception as e:
        validation["errors"].append(f"Validation error: {str(e)}")
        validation["is_valid"] = False
        return validation
AgentModelData

Bases: BaseModel

Source code in toolboxv2/mods/isaa/base/Agent/types.py
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
class AgentModelData(BaseModel):
    name: str = "FlowAgent"
    fast_llm_model: str = "openrouter/anthropic/claude-3-haiku"
    complex_llm_model: str = "openrouter/openai/gpt-4o"
    system_message: str = "You are a production-ready autonomous agent."
    temperature: float = 0.7
    max_tokens: int = 2048
    max_input_tokens: int = 32768
    api_key: str | None  = None
    api_base: str | None  = None
    budget_manager: Any  = None
    caching: bool = True
    persona: PersonaConfig | None = True
    use_fast_response: bool = True

    def get_system_message_with_persona(self) -> str:
        """Get system message with persona integration"""
        base_message = self.system_message

        if self.persona and self.persona.apply_method in ["system_prompt", "both"]:
            persona_addition = self.persona.to_system_prompt_addition()
            if persona_addition:
                base_message += f"\n## Persona Instructions\n{persona_addition}"

        return base_message
get_system_message_with_persona()

Get system message with persona integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
770
771
772
773
774
775
776
777
778
779
def get_system_message_with_persona(self) -> str:
    """Get system message with persona integration"""
    base_message = self.system_message

    if self.persona and self.persona.apply_method in ["system_prompt", "both"]:
        persona_addition = self.persona.to_system_prompt_addition()
        if persona_addition:
            base_message += f"\n## Persona Instructions\n{persona_addition}"

    return base_message
ChainMetadata dataclass

Metadata for stored chains

Source code in toolboxv2/mods/isaa/base/Agent/types.py
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
@dataclass
class ChainMetadata:
    """Metadata for stored chains"""
    name: str
    description: str = ""
    created_at: datetime = field(default_factory=datetime.now)
    modified_at: datetime = field(default_factory=datetime.now)
    version: str = "1.0.0"
    tags: list[str] = field(default_factory=list)
    author: str = ""
    complexity: str = "simple"  # simple, medium, complex
    agent_count: int = 0
    has_conditionals: bool = False
    has_parallels: bool = False
    has_error_handling: bool = False
CompletionCheckerNode

Bases: AsyncNode

Breaks infinite cycles by checking actual completion status

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
@with_progress_tracking
class CompletionCheckerNode(AsyncNode):
    """Breaks infinite cycles by checking actual completion status"""

    def __init__(self):
        super().__init__()
        self.execution_count = 0
        self.max_cycles = 5  # Prevent infinite loops

    async def prep_async(self, shared):
        current_plan = shared.get("current_plan")
        tasks = shared.get("tasks", {})

        return {
            "current_plan": current_plan,
            "tasks": tasks,
            "execution_count": self.execution_count
        }

    async def exec_async(self, prep_res):
        self.execution_count += 1

        # Safety check: prevent infinite loops
        if self.execution_count > self.max_cycles:
            wprint(f"Max execution cycles ({self.max_cycles}) reached, terminating")
            return {
                "action": "force_terminate",
                "reason": "Max cycles reached"
            }

        current_plan = prep_res["current_plan"]
        tasks = prep_res["tasks"]

        if not current_plan:
            return {"action": "truly_complete", "reason": "No active plan"}

        # Check actual completion status
        pending_tasks = [t for t in current_plan.tasks if tasks[t.id].status == "pending"]
        running_tasks = [t for t in current_plan.tasks if tasks[t.id].status == "running"]
        completed_tasks = [t for t in current_plan.tasks if tasks[t.id].status == "completed"]
        failed_tasks = [t for t in current_plan.tasks if tasks[t.id].status == "failed"]

        total_tasks = len(current_plan.tasks)

        # Truly complete: all tasks done
        if len(completed_tasks) + len(failed_tasks) == total_tasks:
            if len(failed_tasks) == 0 or len(completed_tasks) > len(failed_tasks):
                return {"action": "truly_complete", "reason": "All tasks completed"}
            else:
                return {"action": "truly_complete", "reason": "Plan failed but cannot continue"}

        # Has pending tasks that can run
        if pending_tasks and not running_tasks:
            return {"action": "continue_execution", "reason": f"{len(pending_tasks)} tasks ready"}

        # Has running tasks, wait
        if running_tasks:
            return {"action": "continue_execution", "reason": f"{len(running_tasks)} tasks running"}

        # Need reflection if tasks are stuck
        if pending_tasks and not running_tasks:
            return {"action": "needs_reflection", "reason": "Tasks may be blocked"}

        # Default: we're done
        return {"action": "truly_complete", "reason": "No actionable tasks"}

    async def post_async(self, shared, prep_res, exec_res):
        action = exec_res["action"]

        # Reset counter on true completion
        if action == "truly_complete":
            self.execution_count = 0
            shared["flow_completion_reason"] = exec_res["reason"]
        elif action == "force_terminate":  # HINZUGEFÜGT
            self.execution_count = 0
            shared["flow_completion_reason"] = f"Force terminated: {exec_res['reason']}"
            shared["force_terminated"] = True
            wprint(f"Flow force terminated: {exec_res['reason']}")

        return action
ContextAggregatorNode

Bases: AsyncNode

Vereinfachte Context-Aggregation über UnifiedContextManager

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
@with_progress_tracking
class ContextAggregatorNode(AsyncNode):
    """Vereinfachte Context-Aggregation über UnifiedContextManager"""

    async def prep_async(self, shared):
        """Simplified preparation - delegate to UnifiedContextManager"""
        return {
            "context_manager": shared.get("context_manager"),
            "session_id": shared.get("session_id", "default"),
            "original_query": shared.get("current_query", ""),
            "tasks": shared.get("tasks", {}),
            "current_plan": shared.get("current_plan"),
            "world_model": shared.get("world_model", {}),
            "results": shared.get("results", {})
        }

    async def exec_async(self, prep_res):
        """VEREINFACHT: Get aggregated context from UnifiedContextManager"""

        context_manager = prep_res.get("context_manager")
        session_id = prep_res.get("session_id", "default")
        query = prep_res.get("original_query", "")

        if not context_manager:
            # Fallback: Create basic aggregated context
            return self._create_fallback_context(prep_res)

        try:
            #Get unified context from context manager
            unified_context = await context_manager.build_unified_context(session_id, query, "full")

            # Transform to expected aggregated_context format for compatibility
            aggregated_context = {
                "original_query": query,
                "successful_results": self._extract_successful_results(unified_context),
                "failed_attempts": self._extract_failed_attempts(prep_res["tasks"]),
                "key_discoveries": self._extract_key_discoveries(unified_context),
                "adaptation_summary": self._extract_adaptation_summary(prep_res),
                "confidence_scores": self._calculate_confidence_scores(unified_context),
                "unified_context": unified_context,  # Include full unified context
                "context_source": "unified_context_manager"
            }

            return aggregated_context

        except Exception as e:
            eprint(f"UnifiedContextManager aggregation failed: {e}")
            return self._create_fallback_context(prep_res)

    def _extract_successful_results(self, unified_context: dict[str, Any]) -> dict[str, Any]:
        """Extract successful results from unified context"""
        successful_results = {}

        try:
            # Get from variables context
            variables = unified_context.get("variables", {})
            recent_results = variables.get("recent_results", [])

            for result in recent_results:
                if result.get("success"):
                    task_id = result.get("task_id", f"result_{len(successful_results)}")
                    successful_results[task_id] = {
                        "task_description": f"Task {task_id}",
                        "task_type": "unified_context_result",
                        "result": result.get("preview", ""),
                        "metadata": {
                            "timestamp": result.get("timestamp"),
                            "source": "unified_context"
                        }
                    }

            # Also check execution state for completions
            execution_state = unified_context.get("execution_state", {})
            recent_completions = execution_state.get("recent_completions", [])

            for completion in recent_completions:
                task_id = completion.get("id", f"completion_{len(successful_results)}")
                successful_results[task_id] = {
                    "task_description": completion.get("description", "Completed task"),
                    "task_type": "execution_completion",
                    "result": f"Task completed at {completion.get('completed_at', 'unknown time')}",
                    "metadata": {
                        "completion_time": completion.get("completed_at"),
                        "source": "execution_state"
                    }
                }

            return successful_results

        except Exception as e:
            eprint(f"Error extracting successful results: {e}")
            return {}

    def _extract_failed_attempts(self, tasks: dict) -> dict[str, Any]:
        """Extract failed attempts from tasks (existing functionality)"""
        failed_attempts = {}

        try:
            for task_id, task in tasks.items():
                if task.status == "failed":
                    failed_attempts[task_id] = {
                        "description": task.description,
                        "error": task.error,
                        "retry_count": task.retry_count
                    }
            return failed_attempts
        except:
            return {}

    def _extract_key_discoveries(self, unified_context: dict[str, Any]) -> list[dict[str, Any]]:
        """Extract key discoveries from unified context"""
        discoveries = []

        try:
            # Extract from relevant facts
            relevant_facts = unified_context.get("relevant_facts", [])
            for key, value in relevant_facts[:3]:  # Top 3 facts
                discoveries.append({
                    "discovery": f"Fact discovered: {key}",
                    "confidence": 0.8,  # Default confidence for facts
                    "result": value
                })

            # Extract from successful results
            variables = unified_context.get("variables", {})
            recent_results = variables.get("recent_results", [])

            for result in recent_results[:2]:  # Top 2 results
                if result.get("success"):
                    discoveries.append({
                        "discovery": f"Task result: {result.get('task_id', 'unknown')}",
                        "confidence": 0.7,
                        "result": result.get("preview", "")
                    })

            return discoveries

        except Exception as e:
            eprint(f"Error extracting discoveries: {e}")
            return []

    def _extract_adaptation_summary(self, prep_res: dict) -> str:
        """Extract adaptation summary"""
        try:
            current_plan = prep_res.get("current_plan")
            if current_plan and hasattr(current_plan, 'metadata'):
                adaptations = current_plan.metadata.get("adaptations", 0)
                if adaptations > 0:
                    return f"Plan was adapted {adaptations} times to handle unexpected results."
            return ""
        except:
            return ""

    def _calculate_confidence_scores(self, unified_context: dict[str, Any]) -> dict[str, float]:
        """Calculate confidence scores based on unified context"""
        try:
            scores = {"overall": 0.5}

            # Base confidence on available data
            chat_history = unified_context.get("chat_history", [])
            if chat_history:
                scores["conversation_context"] = min(len(chat_history) / 10, 1.0)

            variables = unified_context.get("variables", {})
            recent_results = variables.get("recent_results", [])
            successful_results = [r for r in recent_results if r.get("success")]

            if recent_results:
                scores["execution_results"] = len(successful_results) / len(recent_results)

            # Calculate overall confidence
            scores["overall"] = sum(scores.values()) / len(scores)

            return scores

        except:
            return {"overall": 0.3}

    def _create_fallback_context(self, prep_res: dict) -> dict[str, Any]:
        """Create fallback context when UnifiedContextManager is unavailable"""
        return {
            "original_query": prep_res.get("original_query", ""),
            "successful_results": {},
            "failed_attempts": self._extract_failed_attempts(prep_res.get("tasks", {})),
            "key_discoveries": [],
            "adaptation_summary": "Fallback context - UnifiedContextManager unavailable",
            "confidence_scores": {"overall": 0.2},
            "context_source": "fallback"
        }

    async def post_async(self, shared, prep_res, exec_res):
        """Store aggregated context for downstream nodes"""
        shared["aggregated_context"] = exec_res

        #Also store unified context reference for other nodes
        if "unified_context" in exec_res:
            shared["unified_context"] = exec_res["unified_context"]

        if exec_res.get("successful_results") or exec_res.get("key_discoveries"):
            return "context_ready"
        else:
            return "no_context"
exec_async(prep_res) async

VEREINFACHT: Get aggregated context from UnifiedContextManager

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
async def exec_async(self, prep_res):
    """VEREINFACHT: Get aggregated context from UnifiedContextManager"""

    context_manager = prep_res.get("context_manager")
    session_id = prep_res.get("session_id", "default")
    query = prep_res.get("original_query", "")

    if not context_manager:
        # Fallback: Create basic aggregated context
        return self._create_fallback_context(prep_res)

    try:
        #Get unified context from context manager
        unified_context = await context_manager.build_unified_context(session_id, query, "full")

        # Transform to expected aggregated_context format for compatibility
        aggregated_context = {
            "original_query": query,
            "successful_results": self._extract_successful_results(unified_context),
            "failed_attempts": self._extract_failed_attempts(prep_res["tasks"]),
            "key_discoveries": self._extract_key_discoveries(unified_context),
            "adaptation_summary": self._extract_adaptation_summary(prep_res),
            "confidence_scores": self._calculate_confidence_scores(unified_context),
            "unified_context": unified_context,  # Include full unified context
            "context_source": "unified_context_manager"
        }

        return aggregated_context

    except Exception as e:
        eprint(f"UnifiedContextManager aggregation failed: {e}")
        return self._create_fallback_context(prep_res)
post_async(shared, prep_res, exec_res) async

Store aggregated context for downstream nodes

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
async def post_async(self, shared, prep_res, exec_res):
    """Store aggregated context for downstream nodes"""
    shared["aggregated_context"] = exec_res

    #Also store unified context reference for other nodes
    if "unified_context" in exec_res:
        shared["unified_context"] = exec_res["unified_context"]

    if exec_res.get("successful_results") or exec_res.get("key_discoveries"):
        return "context_ready"
    else:
        return "no_context"
prep_async(shared) async

Simplified preparation - delegate to UnifiedContextManager

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
async def prep_async(self, shared):
    """Simplified preparation - delegate to UnifiedContextManager"""
    return {
        "context_manager": shared.get("context_manager"),
        "session_id": shared.get("session_id", "default"),
        "original_query": shared.get("current_query", ""),
        "tasks": shared.get("tasks", {}),
        "current_plan": shared.get("current_plan"),
        "world_model": shared.get("world_model", {}),
        "results": shared.get("results", {})
    }
DecisionTask dataclass

Bases: Task

Task für dynamisches Routing

Source code in toolboxv2/mods/isaa/base/Agent/types.py
493
494
495
496
497
498
@dataclass
class DecisionTask(Task):
    """Task für dynamisches Routing"""
    decision_prompt: str = ""  # Kurze Frage an LLM
    routing_map: dict[str, str] = field(default_factory=dict)  # Ergebnis -> nächster Task
    decision_model: str = "fast"  # Welches LLM für Entscheidung
FlowAgent

Production-ready agent system built on PocketFlow

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
 8004
 8005
 8006
 8007
 8008
 8009
 8010
 8011
 8012
 8013
 8014
 8015
 8016
 8017
 8018
 8019
 8020
 8021
 8022
 8023
 8024
 8025
 8026
 8027
 8028
 8029
 8030
 8031
 8032
 8033
 8034
 8035
 8036
 8037
 8038
 8039
 8040
 8041
 8042
 8043
 8044
 8045
 8046
 8047
 8048
 8049
 8050
 8051
 8052
 8053
 8054
 8055
 8056
 8057
 8058
 8059
 8060
 8061
 8062
 8063
 8064
 8065
 8066
 8067
 8068
 8069
 8070
 8071
 8072
 8073
 8074
 8075
 8076
 8077
 8078
 8079
 8080
 8081
 8082
 8083
 8084
 8085
 8086
 8087
 8088
 8089
 8090
 8091
 8092
 8093
 8094
 8095
 8096
 8097
 8098
 8099
 8100
 8101
 8102
 8103
 8104
 8105
 8106
 8107
 8108
 8109
 8110
 8111
 8112
 8113
 8114
 8115
 8116
 8117
 8118
 8119
 8120
 8121
 8122
 8123
 8124
 8125
 8126
 8127
 8128
 8129
 8130
 8131
 8132
 8133
 8134
 8135
 8136
 8137
 8138
 8139
 8140
 8141
 8142
 8143
 8144
 8145
 8146
 8147
 8148
 8149
 8150
 8151
 8152
 8153
 8154
 8155
 8156
 8157
 8158
 8159
 8160
 8161
 8162
 8163
 8164
 8165
 8166
 8167
 8168
 8169
 8170
 8171
 8172
 8173
 8174
 8175
 8176
 8177
 8178
 8179
 8180
 8181
 8182
 8183
 8184
 8185
 8186
 8187
 8188
 8189
 8190
 8191
 8192
 8193
 8194
 8195
 8196
 8197
 8198
 8199
 8200
 8201
 8202
 8203
 8204
 8205
 8206
 8207
 8208
 8209
 8210
 8211
 8212
 8213
 8214
 8215
 8216
 8217
 8218
 8219
 8220
 8221
 8222
 8223
 8224
 8225
 8226
 8227
 8228
 8229
 8230
 8231
 8232
 8233
 8234
 8235
 8236
 8237
 8238
 8239
 8240
 8241
 8242
 8243
 8244
 8245
 8246
 8247
 8248
 8249
 8250
 8251
 8252
 8253
 8254
 8255
 8256
 8257
 8258
 8259
 8260
 8261
 8262
 8263
 8264
 8265
 8266
 8267
 8268
 8269
 8270
 8271
 8272
 8273
 8274
 8275
 8276
 8277
 8278
 8279
 8280
 8281
 8282
 8283
 8284
 8285
 8286
 8287
 8288
 8289
 8290
 8291
 8292
 8293
 8294
 8295
 8296
 8297
 8298
 8299
 8300
 8301
 8302
 8303
 8304
 8305
 8306
 8307
 8308
 8309
 8310
 8311
 8312
 8313
 8314
 8315
 8316
 8317
 8318
 8319
 8320
 8321
 8322
 8323
 8324
 8325
 8326
 8327
 8328
 8329
 8330
 8331
 8332
 8333
 8334
 8335
 8336
 8337
 8338
 8339
 8340
 8341
 8342
 8343
 8344
 8345
 8346
 8347
 8348
 8349
 8350
 8351
 8352
 8353
 8354
 8355
 8356
 8357
 8358
 8359
 8360
 8361
 8362
 8363
 8364
 8365
 8366
 8367
 8368
 8369
 8370
 8371
 8372
 8373
 8374
 8375
 8376
 8377
 8378
 8379
 8380
 8381
 8382
 8383
 8384
 8385
 8386
 8387
 8388
 8389
 8390
 8391
 8392
 8393
 8394
 8395
 8396
 8397
 8398
 8399
 8400
 8401
 8402
 8403
 8404
 8405
 8406
 8407
 8408
 8409
 8410
 8411
 8412
 8413
 8414
 8415
 8416
 8417
 8418
 8419
 8420
 8421
 8422
 8423
 8424
 8425
 8426
 8427
 8428
 8429
 8430
 8431
 8432
 8433
 8434
 8435
 8436
 8437
 8438
 8439
 8440
 8441
 8442
 8443
 8444
 8445
 8446
 8447
 8448
 8449
 8450
 8451
 8452
 8453
 8454
 8455
 8456
 8457
 8458
 8459
 8460
 8461
 8462
 8463
 8464
 8465
 8466
 8467
 8468
 8469
 8470
 8471
 8472
 8473
 8474
 8475
 8476
 8477
 8478
 8479
 8480
 8481
 8482
 8483
 8484
 8485
 8486
 8487
 8488
 8489
 8490
 8491
 8492
 8493
 8494
 8495
 8496
 8497
 8498
 8499
 8500
 8501
 8502
 8503
 8504
 8505
 8506
 8507
 8508
 8509
 8510
 8511
 8512
 8513
 8514
 8515
 8516
 8517
 8518
 8519
 8520
 8521
 8522
 8523
 8524
 8525
 8526
 8527
 8528
 8529
 8530
 8531
 8532
 8533
 8534
 8535
 8536
 8537
 8538
 8539
 8540
 8541
 8542
 8543
 8544
 8545
 8546
 8547
 8548
 8549
 8550
 8551
 8552
 8553
 8554
 8555
 8556
 8557
 8558
 8559
 8560
 8561
 8562
 8563
 8564
 8565
 8566
 8567
 8568
 8569
 8570
 8571
 8572
 8573
 8574
 8575
 8576
 8577
 8578
 8579
 8580
 8581
 8582
 8583
 8584
 8585
 8586
 8587
 8588
 8589
 8590
 8591
 8592
 8593
 8594
 8595
 8596
 8597
 8598
 8599
 8600
 8601
 8602
 8603
 8604
 8605
 8606
 8607
 8608
 8609
 8610
 8611
 8612
 8613
 8614
 8615
 8616
 8617
 8618
 8619
 8620
 8621
 8622
 8623
 8624
 8625
 8626
 8627
 8628
 8629
 8630
 8631
 8632
 8633
 8634
 8635
 8636
 8637
 8638
 8639
 8640
 8641
 8642
 8643
 8644
 8645
 8646
 8647
 8648
 8649
 8650
 8651
 8652
 8653
 8654
 8655
 8656
 8657
 8658
 8659
 8660
 8661
 8662
 8663
 8664
 8665
 8666
 8667
 8668
 8669
 8670
 8671
 8672
 8673
 8674
 8675
 8676
 8677
 8678
 8679
 8680
 8681
 8682
 8683
 8684
 8685
 8686
 8687
 8688
 8689
 8690
 8691
 8692
 8693
 8694
 8695
 8696
 8697
 8698
 8699
 8700
 8701
 8702
 8703
 8704
 8705
 8706
 8707
 8708
 8709
 8710
 8711
 8712
 8713
 8714
 8715
 8716
 8717
 8718
 8719
 8720
 8721
 8722
 8723
 8724
 8725
 8726
 8727
 8728
 8729
 8730
 8731
 8732
 8733
 8734
 8735
 8736
 8737
 8738
 8739
 8740
 8741
 8742
 8743
 8744
 8745
 8746
 8747
 8748
 8749
 8750
 8751
 8752
 8753
 8754
 8755
 8756
 8757
 8758
 8759
 8760
 8761
 8762
 8763
 8764
 8765
 8766
 8767
 8768
 8769
 8770
 8771
 8772
 8773
 8774
 8775
 8776
 8777
 8778
 8779
 8780
 8781
 8782
 8783
 8784
 8785
 8786
 8787
 8788
 8789
 8790
 8791
 8792
 8793
 8794
 8795
 8796
 8797
 8798
 8799
 8800
 8801
 8802
 8803
 8804
 8805
 8806
 8807
 8808
 8809
 8810
 8811
 8812
 8813
 8814
 8815
 8816
 8817
 8818
 8819
 8820
 8821
 8822
 8823
 8824
 8825
 8826
 8827
 8828
 8829
 8830
 8831
 8832
 8833
 8834
 8835
 8836
 8837
 8838
 8839
 8840
 8841
 8842
 8843
 8844
 8845
 8846
 8847
 8848
 8849
 8850
 8851
 8852
 8853
 8854
 8855
 8856
 8857
 8858
 8859
 8860
 8861
 8862
 8863
 8864
 8865
 8866
 8867
 8868
 8869
 8870
 8871
 8872
 8873
 8874
 8875
 8876
 8877
 8878
 8879
 8880
 8881
 8882
 8883
 8884
 8885
 8886
 8887
 8888
 8889
 8890
 8891
 8892
 8893
 8894
 8895
 8896
 8897
 8898
 8899
 8900
 8901
 8902
 8903
 8904
 8905
 8906
 8907
 8908
 8909
 8910
 8911
 8912
 8913
 8914
 8915
 8916
 8917
 8918
 8919
 8920
 8921
 8922
 8923
 8924
 8925
 8926
 8927
 8928
 8929
 8930
 8931
 8932
 8933
 8934
 8935
 8936
 8937
 8938
 8939
 8940
 8941
 8942
 8943
 8944
 8945
 8946
 8947
 8948
 8949
 8950
 8951
 8952
 8953
 8954
 8955
 8956
 8957
 8958
 8959
 8960
 8961
 8962
 8963
 8964
 8965
 8966
 8967
 8968
 8969
 8970
 8971
 8972
 8973
 8974
 8975
 8976
 8977
 8978
 8979
 8980
 8981
 8982
 8983
 8984
 8985
 8986
 8987
 8988
 8989
 8990
 8991
 8992
 8993
 8994
 8995
 8996
 8997
 8998
 8999
 9000
 9001
 9002
 9003
 9004
 9005
 9006
 9007
 9008
 9009
 9010
 9011
 9012
 9013
 9014
 9015
 9016
 9017
 9018
 9019
 9020
 9021
 9022
 9023
 9024
 9025
 9026
 9027
 9028
 9029
 9030
 9031
 9032
 9033
 9034
 9035
 9036
 9037
 9038
 9039
 9040
 9041
 9042
 9043
 9044
 9045
 9046
 9047
 9048
 9049
 9050
 9051
 9052
 9053
 9054
 9055
 9056
 9057
 9058
 9059
 9060
 9061
 9062
 9063
 9064
 9065
 9066
 9067
 9068
 9069
 9070
 9071
 9072
 9073
 9074
 9075
 9076
 9077
 9078
 9079
 9080
 9081
 9082
 9083
 9084
 9085
 9086
 9087
 9088
 9089
 9090
 9091
 9092
 9093
 9094
 9095
 9096
 9097
 9098
 9099
 9100
 9101
 9102
 9103
 9104
 9105
 9106
 9107
 9108
 9109
 9110
 9111
 9112
 9113
 9114
 9115
 9116
 9117
 9118
 9119
 9120
 9121
 9122
 9123
 9124
 9125
 9126
 9127
 9128
 9129
 9130
 9131
 9132
 9133
 9134
 9135
 9136
 9137
 9138
 9139
 9140
 9141
 9142
 9143
 9144
 9145
 9146
 9147
 9148
 9149
 9150
 9151
 9152
 9153
 9154
 9155
 9156
 9157
 9158
 9159
 9160
 9161
 9162
 9163
 9164
 9165
 9166
 9167
 9168
 9169
 9170
 9171
 9172
 9173
 9174
 9175
 9176
 9177
 9178
 9179
 9180
 9181
 9182
 9183
 9184
 9185
 9186
 9187
 9188
 9189
 9190
 9191
 9192
 9193
 9194
 9195
 9196
 9197
 9198
 9199
 9200
 9201
 9202
 9203
 9204
 9205
 9206
 9207
 9208
 9209
 9210
 9211
 9212
 9213
 9214
 9215
 9216
 9217
 9218
 9219
 9220
 9221
 9222
 9223
 9224
 9225
 9226
 9227
 9228
 9229
 9230
 9231
 9232
 9233
 9234
 9235
 9236
 9237
 9238
 9239
 9240
 9241
 9242
 9243
 9244
 9245
 9246
 9247
 9248
 9249
 9250
 9251
 9252
 9253
 9254
 9255
 9256
 9257
 9258
 9259
 9260
 9261
 9262
 9263
 9264
 9265
 9266
 9267
 9268
 9269
 9270
 9271
 9272
 9273
 9274
 9275
 9276
 9277
 9278
 9279
 9280
 9281
 9282
 9283
 9284
 9285
 9286
 9287
 9288
 9289
 9290
 9291
 9292
 9293
 9294
 9295
 9296
 9297
 9298
 9299
 9300
 9301
 9302
 9303
 9304
 9305
 9306
 9307
 9308
 9309
 9310
 9311
 9312
 9313
 9314
 9315
 9316
 9317
 9318
 9319
 9320
 9321
 9322
 9323
 9324
 9325
 9326
 9327
 9328
 9329
 9330
 9331
 9332
 9333
 9334
 9335
 9336
 9337
 9338
 9339
 9340
 9341
 9342
 9343
 9344
 9345
 9346
 9347
 9348
 9349
 9350
 9351
 9352
 9353
 9354
 9355
 9356
 9357
 9358
 9359
 9360
 9361
 9362
 9363
 9364
 9365
 9366
 9367
 9368
 9369
 9370
 9371
 9372
 9373
 9374
 9375
 9376
 9377
 9378
 9379
 9380
 9381
 9382
 9383
 9384
 9385
 9386
 9387
 9388
 9389
 9390
 9391
 9392
 9393
 9394
 9395
 9396
 9397
 9398
 9399
 9400
 9401
 9402
 9403
 9404
 9405
 9406
 9407
 9408
 9409
 9410
 9411
 9412
 9413
 9414
 9415
 9416
 9417
 9418
 9419
 9420
 9421
 9422
 9423
 9424
 9425
 9426
 9427
 9428
 9429
 9430
 9431
 9432
 9433
 9434
 9435
 9436
 9437
 9438
 9439
 9440
 9441
 9442
 9443
 9444
 9445
 9446
 9447
 9448
 9449
 9450
 9451
 9452
 9453
 9454
 9455
 9456
 9457
 9458
 9459
 9460
 9461
 9462
 9463
 9464
 9465
 9466
 9467
 9468
 9469
 9470
 9471
 9472
 9473
 9474
 9475
 9476
 9477
 9478
 9479
 9480
 9481
 9482
 9483
 9484
 9485
 9486
 9487
 9488
 9489
 9490
 9491
 9492
 9493
 9494
 9495
 9496
 9497
 9498
 9499
 9500
 9501
 9502
 9503
 9504
 9505
 9506
 9507
 9508
 9509
 9510
 9511
 9512
 9513
 9514
 9515
 9516
 9517
 9518
 9519
 9520
 9521
 9522
 9523
 9524
 9525
 9526
 9527
 9528
 9529
 9530
 9531
 9532
 9533
 9534
 9535
 9536
 9537
 9538
 9539
 9540
 9541
 9542
 9543
 9544
 9545
 9546
 9547
 9548
 9549
 9550
 9551
 9552
 9553
 9554
 9555
 9556
 9557
 9558
 9559
 9560
 9561
 9562
 9563
 9564
 9565
 9566
 9567
 9568
 9569
 9570
 9571
 9572
 9573
 9574
 9575
 9576
 9577
 9578
 9579
 9580
 9581
 9582
 9583
 9584
 9585
 9586
 9587
 9588
 9589
 9590
 9591
 9592
 9593
 9594
 9595
 9596
 9597
 9598
 9599
 9600
 9601
 9602
 9603
 9604
 9605
 9606
 9607
 9608
 9609
 9610
 9611
 9612
 9613
 9614
 9615
 9616
 9617
 9618
 9619
 9620
 9621
 9622
 9623
 9624
 9625
 9626
 9627
 9628
 9629
 9630
 9631
 9632
 9633
 9634
 9635
 9636
 9637
 9638
 9639
 9640
 9641
 9642
 9643
 9644
 9645
 9646
 9647
 9648
 9649
 9650
 9651
 9652
 9653
 9654
 9655
 9656
 9657
 9658
 9659
 9660
 9661
 9662
 9663
 9664
 9665
 9666
 9667
 9668
 9669
 9670
 9671
 9672
 9673
 9674
 9675
 9676
 9677
 9678
 9679
 9680
 9681
 9682
 9683
 9684
 9685
 9686
 9687
 9688
 9689
 9690
 9691
 9692
 9693
 9694
 9695
 9696
 9697
 9698
 9699
 9700
 9701
 9702
 9703
 9704
 9705
 9706
 9707
 9708
 9709
 9710
 9711
 9712
 9713
 9714
 9715
 9716
 9717
 9718
 9719
 9720
 9721
 9722
 9723
 9724
 9725
 9726
 9727
 9728
 9729
 9730
 9731
 9732
 9733
 9734
 9735
 9736
 9737
 9738
 9739
 9740
 9741
 9742
 9743
 9744
 9745
 9746
 9747
 9748
 9749
 9750
 9751
 9752
 9753
 9754
 9755
 9756
 9757
 9758
 9759
 9760
 9761
 9762
 9763
 9764
 9765
 9766
 9767
 9768
 9769
 9770
 9771
 9772
 9773
 9774
 9775
 9776
 9777
 9778
 9779
 9780
 9781
 9782
 9783
 9784
 9785
 9786
 9787
 9788
 9789
 9790
 9791
 9792
 9793
 9794
 9795
 9796
 9797
 9798
 9799
 9800
 9801
 9802
 9803
 9804
 9805
 9806
 9807
 9808
 9809
 9810
 9811
 9812
 9813
 9814
 9815
 9816
 9817
 9818
 9819
 9820
 9821
 9822
 9823
 9824
 9825
 9826
 9827
 9828
 9829
 9830
 9831
 9832
 9833
 9834
 9835
 9836
 9837
 9838
 9839
 9840
 9841
 9842
 9843
 9844
 9845
 9846
 9847
 9848
 9849
 9850
 9851
 9852
 9853
 9854
 9855
 9856
 9857
 9858
 9859
 9860
 9861
 9862
 9863
 9864
 9865
 9866
 9867
 9868
 9869
 9870
 9871
 9872
 9873
 9874
 9875
 9876
 9877
 9878
 9879
 9880
 9881
 9882
 9883
 9884
 9885
 9886
 9887
 9888
 9889
 9890
 9891
 9892
 9893
 9894
 9895
 9896
 9897
 9898
 9899
 9900
 9901
 9902
 9903
 9904
 9905
 9906
 9907
 9908
 9909
 9910
 9911
 9912
 9913
 9914
 9915
 9916
 9917
 9918
 9919
 9920
 9921
 9922
 9923
 9924
 9925
 9926
 9927
 9928
 9929
 9930
 9931
 9932
 9933
 9934
 9935
 9936
 9937
 9938
 9939
 9940
 9941
 9942
 9943
 9944
 9945
 9946
 9947
 9948
 9949
 9950
 9951
 9952
 9953
 9954
 9955
 9956
 9957
 9958
 9959
 9960
 9961
 9962
 9963
 9964
 9965
 9966
 9967
 9968
 9969
 9970
 9971
 9972
 9973
 9974
 9975
 9976
 9977
 9978
 9979
 9980
 9981
 9982
 9983
 9984
 9985
 9986
 9987
 9988
 9989
 9990
 9991
 9992
 9993
 9994
 9995
 9996
 9997
 9998
 9999
10000
10001
10002
10003
10004
10005
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
10051
10052
10053
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
10188
10189
10190
10191
10192
10193
10194
10195
10196
10197
10198
10199
10200
10201
10202
10203
10204
10205
10206
10207
10208
10209
10210
10211
10212
10213
10214
10215
10216
10217
10218
10219
10220
10221
10222
10223
10224
10225
10226
10227
10228
10229
10230
10231
10232
10233
10234
10235
10236
10237
10238
10239
10240
10241
10242
10243
10244
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
10276
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
10297
10298
10299
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
10313
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
10391
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
10463
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
10488
10489
10490
10491
10492
10493
10494
10495
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
10789
10790
10791
10792
10793
10794
10795
10796
10797
10798
10799
10800
10801
10802
10803
10804
10805
class FlowAgent:
    """Production-ready agent system built on PocketFlow """
    def __init__(
        self,
        amd: AgentModelData,
        world_model: dict[str, Any] = None,
        verbose: bool = False,
        enable_pause_resume: bool = True,
        checkpoint_interval: int = 300,  # 5 minutes
        max_parallel_tasks: int = 3,
        progress_callback: callable = None,
        **kwargs
    ):
        self.amd = amd
        self.world_model = world_model or {}
        self.verbose = verbose
        self.enable_pause_resume = enable_pause_resume
        self.checkpoint_interval = checkpoint_interval
        self.max_parallel_tasks = max_parallel_tasks
        self.progress_tracker = ProgressTracker(progress_callback, agent_name=amd.name)

        # Core state
        self.shared = {
            "world_model": self.world_model,
            "tasks": {},
            "task_plans": {},
            "system_status": "idle",
            "session_data": {},
            "performance_metrics": {},
            "conversation_history": [],
            "available_tools": [],
            "progress_tracker": self.progress_tracker
        }
        self.context_manager = UnifiedContextManager(self)
        self.variable_manager = VariableManager(self.shared["world_model"], self.shared)
        self.context_manager.variable_manager = self.variable_manager# Register default scopes

        self.shared["context_manager"] = self.context_manager
        self.shared["variable_manager"] = self.variable_manager
        # Flows
        self.task_flow = TaskManagementFlow(max_parallel_tasks=self.max_parallel_tasks)
        self.response_flow = ResponseGenerationFlow()

        if hasattr(self.task_flow, 'executor_node'):
            self.task_flow.executor_node.agent_instance = self

        # Agent state
        self.is_running = False
        self.is_paused = False
        self.last_checkpoint = None
        self.checkpoint_data = {}

        # Threading
        self.executor = ThreadPoolExecutor(max_workers=max_parallel_tasks)
        self._shutdown_event = threading.Event()

        # Server components
        self.a2a_server: A2AServer = None
        self.mcp_server: FastMCP = None

        # Enhanced tool registry
        self._tool_registry = {}
        self._tool_capabilities = {}
        self._tool_analysis_cache = {}

        self.active_session = None
        # Tool analysis file path
        self.tool_analysis_file = self._get_tool_analysis_path()

        self._tool_capabilities.update(self._load_tool_analysis())
        if self.amd.budget_manager:
            self.amd.budget_manager.load_data()

        self._setup_variable_scopes()

        rprint(f"FlowAgent initialized: {amd.name}")

    @property
    def progress_callback(self):
        return self.progress_tracker.progress_callback

    @progress_callback.setter
    def progress_callback(self, value):
        self.progress_tracker.progress_callback = value

    def set_progress_callback(self, progress_callback: callable = None):
        self.progress_callback = progress_callback

    async def a_run_llm_completion(self, node_name="FlowAgentLLMCall",task_id="unknown",model_preference="fast", with_context=True, **kwargs) -> str:
        if "model" not in kwargs:
            kwargs["model"] = self.amd.fast_llm_model if model_preference == "fast" else self.amd.complex_llm_model

        llm_start = time.perf_counter()

        if self.progress_tracker:
            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="llm_call",
                node_name=node_name,
                session_id=self.active_session,
                task_id=task_id,
                status=NodeStatus.RUNNING,
                llm_model=kwargs["model"],
                llm_temperature=kwargs.get("temperature", 0.7),
                llm_input=kwargs.get("messages", [{}])[-1].get("content", ""),  # Prompt direkt erfassen
                metadata={
                    "model_preference": kwargs.get("model_preference", "fast")
                }
            ))

        # auto api key addition supports (google, openrouter, openai, anthropic, azure, aws, huggingface, replicate, togetherai, groq)
        if "api_key" not in kwargs:
            # litellm model-prefix apikey mapp
            prefix = kwargs['model'].split("/")[0]
            model_prefix_map = {
                "openrouter": os.getenv("OPENROUTER_API_KEY"),
                "openai": os.getenv("OPENAI_API_KEY"),
                "anthropic": os.getenv("ANTHROPIC_API_KEY"),
                "google": os.getenv("GOOGLE_API_KEY"),
                "azure": os.getenv("AZURE_API_KEY"),
                "huggingface": os.getenv("HUGGINGFACE_API_KEY"),
                "replicate": os.getenv("REPLICATE_API_KEY"),
                "togetherai": os.getenv("TOGETHERAI_API_KEY"),
                "groq": os.getenv("GROQ_API_KEY"),
            }
            kwargs["api_key"] = model_prefix_map.get(prefix)

        if self.active_session and with_context:
            # Add context to fist messages as system message
            context_ = await self.get_context(self.active_session)
            kwargs["messages"] = [{"role": "system", "content": self.amd.get_system_message_with_persona()+'\n\nContext:\n\n'+context_}] + kwargs.get("messages", [])

        # build fallback dict using FALLBACKS_MODELS/PREM and _KEYS

        if 'fallbacks' not in kwargs:
            fallbacks_dict_list = []
            fallbacks = os.getenv("FALLBACKS_MODELS", '').split(',') if model_preference == "fast" else os.getenv(
                "FALLBACKS_MODELS_PREM", '').split(',')
            fallbacks_keys = os.getenv("FALLBACKS_MODELS_KEYS", '').split(
                ',') if model_preference == "fast" else os.getenv(
                "FALLBACKS_MODELS_KEYS_PREM", '').split(',')
            for model, key in zip(fallbacks, fallbacks_keys):
                fallbacks_dict_list.append({"model": model, "api_key": key})
            kwargs['fallbacks'] = fallbacks_dict_list
        try:

            response = await litellm.acompletion(**kwargs)

            llm_duration = time.perf_counter() - llm_start
            result = response.choices[0].message.content

            if AGENT_VERBOSE and self.verbose:
                kwargs["messages"] += [{"role": "assistant", "content": result}]
                print_prompt(kwargs)
            # else:
            #     print_prompt([{"role": "assistant", "content": result}])

            # Extract token usage and cost
            usage = response.usage
            input_tokens = usage.prompt_tokens if usage else 0
            output_tokens = usage.completion_tokens if usage else 0
            total_tokens = usage.total_tokens if usage else 0

            call_cost = self.progress_tracker.calculate_llm_cost(kwargs["model"], input_tokens,
                                                            output_tokens, response) if self.progress_tracker else 0.0

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="llm_call",
                    node_name=node_name,
                    task_id=task_id,
                    session_id=self.active_session,
                    status=NodeStatus.COMPLETED,
                    success=True,
                    duration=llm_duration,
                    llm_model=kwargs["model"],
                    llm_prompt_tokens=input_tokens,
                    llm_completion_tokens=output_tokens,
                    llm_total_tokens=total_tokens,
                    llm_cost=call_cost,
                    llm_temperature=kwargs.get("temperature", 0.7),
                    llm_output=result,
                    llm_input="",
                ))

            return result
        except Exception as e:
            llm_duration = time.perf_counter() - llm_start
            import traceback
            print(traceback.format_exc())

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="llm_call",  # Event-Typ bleibt konsistent
                    node_name=node_name,
                    task_id=task_id,
                    session_id=self.active_session,
                    status=NodeStatus.FAILED,
                    success=False,
                    duration=llm_duration,
                    llm_model=kwargs["model"],
                    error_details={
                        "message": str(e),
                        "type": type(e).__name__
                    }
                ))

            raise

    async def a_run(
        self,
        query: str,
        session_id: str = "default",
        user_id: str = None,
        stream_callback: Callable = None,
        remember: bool = True,
        **kwargs
    ) -> str:
        """Main entry point für Agent-Ausführung mit UnifiedContextManager"""

        execution_start = self.progress_tracker.start_timer("total_execution")
        self.active_session = session_id
        result = None
        await self.progress_tracker.emit_event(ProgressEvent(
            event_type="execution_start",
            timestamp=time.time(),
            status=NodeStatus.RUNNING,
            node_name="FlowAgent",
            session_id=session_id,
            metadata={"query": query, "user_id": user_id}
        ))

        try:
            #Initialize or get session über UnifiedContextManager
            await self.initialize_session_context(session_id, max_history=200)

            #Store user message immediately in ChatSession wenn remember=True
            if remember:
                await self.context_manager.add_interaction(
                    session_id,
                    'user',
                    query,
                    metadata={"user_id": user_id}
                )

            # Set user context variables
            timestamp = datetime.now()
            self.variable_manager.register_scope('user', {
                'id': user_id,
                'session': session_id,
                'query': query,
                'timestamp': timestamp.isoformat()
            })

            # Update system variables
            self.variable_manager.set('system_context.timestamp', {'isoformat': timestamp.isoformat()})
            self.variable_manager.set('system_context.current_session', session_id)
            self.variable_manager.set('system_context.current_user', user_id)
            self.variable_manager.set('system_context.last_query', query)

            # Initialize with tool awareness
            await self.initialize_context_awareness()

            # VEREINFACHT: Prepare execution context - weniger Daten duplizieren
            self.shared.update({
                "current_query": query,
                "session_id": session_id,
                "user_id": user_id,
                "stream_callback": stream_callback,
                "remember": remember,
                # CENTRAL: Context Manager ist die primäre Context-Quelle
                "context_manager": self.context_manager,
                "variable_manager": self.variable_manager
            })

            # Set LLM models in shared context
            self.shared['fast_llm_model'] = self.amd.fast_llm_model
            self.shared['complex_llm_model'] = self.amd.complex_llm_model
            self.shared['persona_config'] = self.amd.persona
            self.shared['use_fast_response'] = self.amd.use_fast_response

            # Set system status
            self.shared["system_status"] = "running"
            self.is_running = True

            # Execute main orchestration flow
            result = await self._orchestrate_execution()

            #Store assistant response in ChatSession wenn remember=True
            if remember:
                await self.context_manager.add_interaction(
                    session_id,
                    'assistant',
                    result,
                    metadata={"user_id": user_id, "execution_duration": time.time() - execution_start}
                )

            total_duration = self.progress_tracker.end_timer("total_execution")

            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="execution_complete",
                timestamp=time.time(),
                node_name="FlowAgent",
                status=NodeStatus.COMPLETED,
                node_duration=total_duration,
                session_id=session_id,
                metadata={
                    "result_length": len(result),
                    "summary": self.progress_tracker.get_summary(),
                    "remembered": remember
                }
            ))

            # Checkpoint if needed
            if self.enable_pause_resume:
                await self._maybe_checkpoint()

            return result

        except Exception as e:
            eprint(f"Agent execution failed: {e}", exc_info=True)
            error_response = f"I encountered an error: {str(e)}"
            result = error_response
            import traceback
            print(traceback.format_exc())

            # Store error in ChatSession wenn remember=True
            if remember:
                await self.context_manager.add_interaction(
                    session_id,
                    'assistant',
                    error_response,
                    metadata={
                        "user_id": user_id,
                        "error": True,
                        "error_type": type(e).__name__
                    }
                )

            total_duration = self.progress_tracker.end_timer("total_execution")

            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="error",
                timestamp=time.time(),
                node_name="FlowAgent",
                status=NodeStatus.FAILED,
                node_duration=total_duration,
                session_id=session_id,
                metadata={"error": str(e), "error_type": type(e).__name__}
            ))

            return error_response

        finally:
            self.shared["system_status"] = "idle"
            self.is_running = False
            self.active_session = None

    def set_response_format(
        self,
        response_format: str,
        text_length: str,
        custom_instructions: str = "",
        quality_threshold: float = 0.7
    ):
        """Dynamische Format- und Längen-Konfiguration"""

        # Validiere Eingaben
        try:
            ResponseFormat(response_format)
            TextLength(text_length)
        except ValueError:
            available_formats = [f.value for f in ResponseFormat]
            available_lengths = [l.value for l in TextLength]
            raise ValueError(
                f"Invalid format or length. "
                f"Available formats: {available_formats}. "
                f"Available lengths: {available_lengths}"
            )

        # Erstelle oder aktualisiere Persona
        if not self.amd.persona:
            self.amd.persona = PersonaConfig(name="Assistant")

        # Erstelle Format-Konfiguration
        format_config = FormatConfig(
            response_format=ResponseFormat(response_format),
            text_length=TextLength(text_length),
            custom_instructions=custom_instructions,
            quality_threshold=quality_threshold
        )

        self.amd.persona.format_config = format_config

        # Aktualisiere Personality Traits mit Format-Hinweisen
        self._update_persona_with_format(response_format, text_length)

        # Update shared state
        self.shared["persona_config"] = self.amd.persona
        self.shared["format_config"] = format_config

        rprint(f"Response format set: {response_format}, length: {text_length}")

    def _update_persona_with_format(self, response_format: str, text_length: str):
        """Aktualisiere Persona-Traits basierend auf Format"""

        # Format-spezifische Traits
        format_traits = {
            "with-tables": ["structured", "data-oriented", "analytical"],
            "with-bullet-points": ["organized", "clear", "systematic"],
            "with-lists": ["methodical", "sequential", "thorough"],
            "md-text": ["technical", "formatted", "detailed"],
            "yaml-text": ["structured", "machine-readable", "precise"],
            "json-text": ["technical", "API-focused", "structured"],
            "text-only": ["conversational", "natural", "flowing"],
            "pseudo-code": ["logical", "algorithmic", "step-by-step"],
            "code-structure": ["technical", "systematic", "hierarchical"]
        }

        # Längen-spezifische Traits
        length_traits = {
            "mini-chat": ["concise", "quick", "to-the-point"],
            "chat-conversation": ["conversational", "friendly", "balanced"],
            "table-conversation": ["structured", "comparative", "organized"],
            "detailed-indepth": ["thorough", "comprehensive", "analytical"],
            "phd-level": ["academic", "scholarly", "authoritative"]
        }

        # Kombiniere Traits
        current_traits = set(self.amd.persona.personality_traits)

        # Entferne alte Format-Traits
        old_format_traits = set()
        for traits in format_traits.values():
            old_format_traits.update(traits)
        for traits in length_traits.values():
            old_format_traits.update(traits)

        current_traits -= old_format_traits

        # Füge neue Traits hinzu
        new_traits = format_traits.get(response_format, [])
        new_traits.extend(length_traits.get(text_length, []))

        current_traits.update(new_traits)
        self.amd.persona.personality_traits = list(current_traits)

    def get_available_formats(self) -> dict[str, list[str]]:
        """Erhalte verfügbare Format- und Längen-Optionen"""
        return {
            "formats": [f.value for f in ResponseFormat],
            "lengths": [l.value for l in TextLength],
            "format_descriptions": {
                f.value: FormatConfig(response_format=f).get_format_instructions()
                for f in ResponseFormat
            },
            "length_descriptions": {
                l.value: FormatConfig(text_length=l).get_length_instructions()
                for l in TextLength
            }
        }

    async def a_run_with_format(
        self,
        query: str,
        response_format: str = "frei-text",
        text_length: str = "chat-conversation",
        custom_instructions: str = "",
        **kwargs
    ) -> str:
        """Führe Agent mit spezifischem Format aus"""

        # Temporäre Format-Einstellung
        original_persona = self.amd.persona

        try:
            self.set_response_format(response_format, text_length, custom_instructions)
            response = await self.a_run(query, **kwargs)
            return response
        finally:
            # Restore original persona
            self.amd.persona = original_persona
            self.shared["persona_config"] = original_persona

    def get_format_quality_report(self) -> dict[str, Any]:
        """Erhalte detaillierten Format-Qualitätsbericht"""
        quality_assessment = self.shared.get("quality_assessment", {})

        if not quality_assessment:
            return {"status": "no_assessment", "message": "No recent quality assessment available"}

        quality_details = quality_assessment.get("quality_details", {})

        return {
            "overall_score": quality_details.get("total_score", 0.0),
            "format_adherence": quality_details.get("format_adherence", 0.0),
            "length_adherence": quality_details.get("length_adherence", 0.0),
            "content_quality": quality_details.get("base_quality", 0.0),
            "llm_assessment": quality_details.get("llm_assessment", 0.0),
            "suggestions": quality_assessment.get("suggestions", []),
            "assessment": quality_assessment.get("quality_assessment", "unknown"),
            "format_config_active": quality_details.get("format_config_used", False)
        }

    def get_variable_documentation(self) -> str:
        """Get comprehensive variable system documentation"""
        docs = []
        docs.append("# Variable System Documentation\n")

        # Available scopes
        docs.append("## Available Scopes:")
        scope_info = self.variable_manager.get_scope_info()
        for scope_name, info in scope_info.items():
            docs.append(f"- `{scope_name}`: {info['type']} with {info.get('keys', 'N/A')} keys")

        docs.append("\n## Syntax Options:")
        docs.append("- `{{ variable.path }}` - Full path resolution")
        docs.append("- `{variable}` - Simple variable (no dots)")
        docs.append("- `$variable` - Shell-style variable")

        docs.append("\n## Example Usage:")
        docs.append("- `{{ results.task_1.data }}` - Get result from task_1")
        docs.append("- `{{ user.name }}` - Get user name")
        docs.append("- `{agent_name}` - Simple agent name")
        docs.append("- `$timestamp` - System timestamp")

        # Available variables
        docs.append("\n## Available Variables:")
        variables = self.variable_manager.get_available_variables()
        for scope_name, scope_vars in variables.items():
            docs.append(f"\n### {scope_name}:")
            for _var_name, var_info in scope_vars.items():
                docs.append(f"- `{var_info['path']}`: {var_info['preview']} ({var_info['type']})")

        return "\n".join(docs)

    def _setup_variable_scopes(self):
        """Setup default variable scopes with enhanced structure"""
        self.variable_manager.register_scope('agent', {
            'name': self.amd.name,
            'model_fast': self.amd.fast_llm_model,
            'model_complex': self.amd.complex_llm_model
        })

        timestamp = datetime.now()
        self.variable_manager.register_scope('system', {
            'timestamp': timestamp.isoformat(),
            'version': '2.0',
            'capabilities': list(self._tool_capabilities.keys())
        })

        # ADDED: Initialize empty results and tasks scopes
        self.variable_manager.register_scope('results', {})
        self.variable_manager.register_scope('tasks', {})

        # Update shared state
        self.shared["variable_manager"] = self.variable_manager

    def set_variable(self, path: str, value: Any):
        """Set variable using unified system"""
        self.variable_manager.set(path, value)

    def get_variable(self, path: str, default=None):
        """Get variable using unified system"""
        return self.variable_manager.get(path, default)

    def format_text(self, text: str, **context) -> str:
        """Format text with variables"""
        return self.variable_manager.format_text(text, context)

    async def initialize_session_context(self, session_id: str = "default", max_history: int = 200) -> bool:
        """Vereinfachte Session-Initialisierung über UnifiedContextManager"""
        try:
            # Delegation an UnifiedContextManager
            session = await self.context_manager.initialize_session(session_id, max_history)

            # Ensure Variable Manager integration
            if not self.context_manager.variable_manager:
                self.context_manager.variable_manager = self.variable_manager

            # Update shared state (minimal - primary data now in context_manager)
            self.shared["active_session_id"] = session_id
            self.shared["session_initialized"] = True

            # Legacy support: Keep session_managers reference in shared for backward compatibility
            self.shared["session_managers"] = self.context_manager.session_managers

            rprint(f"Session context initialized for {session_id} via UnifiedContextManager")
            return True

        except Exception as e:
            eprint(f"Session context initialization failed: {e}")
            import traceback
            print(traceback.format_exc())
            return False

    async def initialize_context_awareness(self):
        """Enhanced context awareness with session management"""

        # Initialize session if not already done
        session_id = self.shared.get("session_id", self.active_session)
        if not self.shared.get("session_initialized"):
            await self.initialize_session_context(session_id)

        # Ensure tool capabilities are loaded
        # add tqdm prigress bar

        from tqdm import tqdm

        if hasattr(self.task_flow, 'llm_reasoner'):
            if "read_from_variables" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_read_from_variables'):
                await self.add_tool(lambda scope, key, purpose: self.task_flow.llm_reasoner._execute_read_from_variables({"scope": scope, "key": key, "purpose": purpose}), "read_from_variables", "Read from variables")
            if "write_to_variables" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_write_to_variables'):
                await self.add_tool(lambda scope, key, value, description: self.task_flow.llm_reasoner._execute_write_to_variables({"scope": scope, "key": key, "value": value, "description": description}), "write_to_variables", "Write to variables")

            if "internal_reasoning" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_internal_reasoning'):
                async def internal_reasoning_tool(thought:str, thought_number:int, total_thoughts:int, next_thought_needed:bool, current_focus:str, key_insights:list[str], potential_issues:list[str], confidence_level:float):
                    args = {
                        "thought": thought,
                        "thought_number": thought_number,
                        "total_thoughts": total_thoughts,
                        "next_thought_needed": next_thought_needed,
                        "current_focus": current_focus,
                        "key_insights": key_insights,
                        "potential_issues": potential_issues,
                        "confidence_level": confidence_level
                    }
                    return await self.task_flow.llm_reasoner._execute_internal_reasoning(args, self.shared)
                await self.add_tool(internal_reasoning_tool, "internal_reasoning", "Internal reasoning")

            if "manage_internal_task_stack" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_manage_task_stack'):
                async def manage_internal_task_stack_tool(action:str, task_description:str, outline_step_ref:str):
                    args = {
                        "action": action,
                        "task_description": task_description,
                        "outline_step_ref": outline_step_ref
                    }
                    return await self.task_flow.llm_reasoner._execute_manage_task_stack(args, self.shared)
                await self.add_tool(manage_internal_task_stack_tool, "manage_internal_task_stack", "Manage internal task stack")

            if "outline_step_completion" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_outline_step_completion'):
                async def outline_step_completion_tool(step_completed:bool, completion_evidence:str, next_step_focus:str):
                    args = {
                        "step_completed": step_completed,
                        "completion_evidence": completion_evidence,
                        "next_step_focus": next_step_focus
                    }
                    return await self.task_flow.llm_reasoner._execute_outline_step_completion(args, self.shared)
                await self.add_tool(outline_step_completion_tool, "outline_step_completion", "Outline step completion")


        registered_tools = set(self._tool_registry.keys())
        cached_capabilities = list(self._tool_capabilities.keys())  # Create a copy of
        for tool_name in cached_capabilities:
            if tool_name in self._tool_capabilities and tool_name not in registered_tools:
                del self._tool_capabilities[tool_name]
                print(f"Removed outdated capability for unavailable tool: {tool_name}")

        for tool_name in tqdm(self.shared["available_tools"], desc=f"Agent {self.amd.name} Analyzing Tools", unit="tool", colour="green", total=len(self.shared["available_tools"])):
            if tool_name not in self._tool_capabilities:
                tool_info = self._tool_registry.get(tool_name, {})
                description = tool_info.get("description", "No description")
                with Spinner(f"Analyzing tool {tool_name}"):
                    await self._analyze_tool_capabilities(tool_name, description, tool_info.get("args_schema", "()"))

            if tool_name in self._tool_capabilities:
                function = self._tool_registry[tool_name]["function"]
                self._tool_capabilities[tool_name]["args_schema"] = get_args_schema(function)

        # Set enhanced system context
        self.shared["system_context"] = {
            "capabilities_summary": self._build_capabilities_summary(),
            "tool_count": len(self.shared["available_tools"]),
            "analysis_loaded": len(self._tool_capabilities),
            "intelligence_level": "high" if self._tool_capabilities else "basic",
            "context_management": "advanced_session_aware",
            "session_managers": len(self.shared.get("session_managers", {})),
        }


        rprint("Advanced context awareness initialized with session management")

    async def get_context(self, session_id: str = None, format_for_llm: bool = True) -> str | dict[str, Any]:
        """
        ÜBERARBEITET: Get context über UnifiedContextManager statt verteilte Quellen
        """
        try:
            session_id = session_id or self.shared.get("session_id", self.active_session)
            query = self.shared.get("current_query", "")

            #Hole unified context über Context Manager
            unified_context = await self.context_manager.build_unified_context(session_id, query, "full")


            if format_for_llm:
                return self.context_manager.get_formatted_context_for_llm(unified_context)
            else:
                return unified_context

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            eprint(f"Failed to generate context via UnifiedContextManager: {e}")

            # FALLBACK: Fallback zu alter Methode falls UnifiedContextManager fehlschlägt
            if format_for_llm:
                return f"Error generating context: {str(e)}"
            else:
                return {
                    "error": str(e),
                    "generated_at": datetime.now().isoformat(),
                    "fallback_mode": True
                }

    def get_context_statistics(self) -> dict[str, Any]:
        """Get comprehensive context management statistics"""
        stats = {
            "context_system": "advanced_session_aware",
            "compression_threshold": 0.76,
            "max_tokens": getattr(self, 'max_input_tokens', 8000),
            "session_managers": {},
            "context_usage": {},
            "compression_stats": {}
        }

        # Session manager statistics
        session_managers = self.shared.get("session_managers", {})
        for name, manager in session_managers.items():
            stats["session_managers"][name] = {
                "history_length": len(manager.history),
                "max_length": manager.max_length,
                "space_name": manager.space_name
            }

        # Context node statistics if available
        if hasattr(self.task_flow, 'context_manager'):
            context_manager = self.task_flow.context_manager
            stats["compression_stats"] = {
                "compression_threshold": context_manager.compression_threshold,
                "max_tokens": context_manager.max_tokens,
                "active_sessions": len(context_manager.session_managers)
            }

        # LLM call statistics from enhanced node
        llm_stats = self.shared.get("llm_call_stats", {})
        if llm_stats:
            stats["context_usage"] = {
                "total_llm_calls": llm_stats.get("total_calls", 0),
                "context_compression_rate": llm_stats.get("context_compression_rate", 0.0),
                "average_context_tokens": llm_stats.get("context_tokens_used", 0) / max(llm_stats.get("total_calls", 1),
                                                                                        1)
            }

        return stats

    def set_persona(self, name: str, style: str = "professional", tone: str = "friendly",
                    personality_traits: list[str] = None, apply_method: str = "system_prompt",
                    integration_level: str = "light", custom_instructions: str = ""):
        """Set agent persona mit erweiterten Konfigurationsmöglichkeiten"""
        if personality_traits is None:
            personality_traits = ["helpful", "concise"]

        self.amd.persona = PersonaConfig(
            name=name,
            style=style,
            tone=tone,
            personality_traits=personality_traits,
            custom_instructions=custom_instructions,
            apply_method=apply_method,
            integration_level=integration_level
        )

        rprint(f"Persona set: {name} ({style}, {tone}) - Method: {apply_method}, Level: {integration_level}")

    def configure_persona_integration(self, apply_method: str = "system_prompt", integration_level: str = "light"):
        """Configure how persona is applied"""
        if self.amd.persona:
            self.amd.persona.apply_method = apply_method
            self.amd.persona.integration_level = integration_level
            rprint(f"Persona integration updated: {apply_method}, {integration_level}")
        else:
            wprint("No persona configured to update")

    def get_available_variables(self) -> dict[str, str]:
        """Get available variables for dynamic formatting"""
        return self.variable_manager.get_available_variables()

    async def _orchestrate_execution(self) -> str:
        """
        Enhanced orchestration with LLMReasonerNode as strategic core.
        The reasoner now handles both task management and response generation internally.
        """

        self.shared["agent_instance"] = self
        self.shared["session_id"] = self.active_session
        # === UNIFIED REASONING AND EXECUTION CYCLE ===
        rprint("Starting strategic reasoning and execution cycle")

        # The LLMReasonerNode now handles the complete cycle:
        # 1. Strategic analysis of the query
        # 2. Decision making about approach
        # 3. Orchestration of sub-systems (LLMToolNode, TaskPlanner/Executor)
        # 4. Response synthesis and formatting

        # Execute the unified flow
        task_management_result = await self.task_flow.run_async(self.shared)

        # Check for various completion states
        if self.shared.get("plan_halted"):
            error_response = f"Task execution was halted: {self.shared.get('halt_reason', 'Unknown reason')}"
            self.shared["current_response"] = error_response
            return error_response

        final_response = self.shared.get("current_response", "Task completed successfully.")
        # Execute ResponseGenerationFlow for persona application and formatting
        response_result = await self.response_flow.run_async(self.shared)

        # The reasoner provides the final response
        final_response = self.shared.get("current_response", "Task completed successfully.")

        # Add reasoning artifacts to response if available
        reasoning_artifacts = self.shared.get("reasoning_artifacts", {})
        if reasoning_artifacts and reasoning_artifacts.get("reasoning_loops", 0) > 1:
            # For debugging/transparency, could add reasoning info to metadata
            pass

        # Log enhanced statistics
        self._log_execution_stats()

        return final_response

    def _log_execution_stats(self):
        """Enhanced execution statistics with reasoning metrics"""
        tasks = self.shared.get("tasks", {})
        adaptations = self.shared.get("plan_adaptations", 0)
        reasoning_artifacts = self.shared.get("reasoning_artifacts", {})

        completed_tasks = sum(1 for t in tasks.values() if t.status == "completed")
        failed_tasks = sum(1 for t in tasks.values() if t.status == "failed")

        # Enhanced logging with reasoning metrics
        reasoning_loops = reasoning_artifacts.get("reasoning_loops", 0)

        stats_message = f"Execution complete - Tasks: {completed_tasks} completed, {failed_tasks} failed"

        if adaptations > 0:
            stats_message += f", {adaptations} adaptations"

        if reasoning_loops > 0:
            stats_message += f", {reasoning_loops} reasoning loops"

            # Add reasoning efficiency metric
            if completed_tasks > 0:
                efficiency = completed_tasks / max(reasoning_loops, 1)
                stats_message += f" (efficiency: {efficiency:.1f} tasks/loop)"

        rprint(stats_message)

        # Log reasoning context if significant
        if reasoning_loops > 3:
            internal_task_stack = reasoning_artifacts.get("internal_task_stack", [])
            completed_reasoning_tasks = len([t for t in internal_task_stack if t.get("status") == "completed"])

            if completed_reasoning_tasks > 0:
                rprint(f"Strategic reasoning: {completed_reasoning_tasks} high-level tasks completed")

    def _build_capabilities_summary(self) -> str:
        """Build summary of agent capabilities"""

        if not self._tool_capabilities:
            return "Basic LLM capabilities only"

        summaries = []
        for tool_name, cap in self._tool_capabilities.items():
            primary = cap.get('primary_function', 'Unknown function')
            summaries.append(f"{tool_name}{cap.get('args_schema', '()')}: {primary}")

        return f"Enhanced capabilities: {'; '.join(summaries)}"

    # Neue Hilfsmethoden für erweiterte Funktionalität

    async def get_task_execution_summary(self) -> dict[str, Any]:
        """Erhalte detaillierte Zusammenfassung der Task-Ausführung"""
        tasks = self.shared.get("tasks", {})
        results_store = self.shared.get("results", {})

        summary = {
            "total_tasks": len(tasks),
            "completed_tasks": [],
            "failed_tasks": [],
            "task_types_used": {},
            "tools_used": [],
            "adaptations": self.shared.get("plan_adaptations", 0),
            "execution_timeline": []
        }

        for task_id, task in tasks.items():
            task_info = {
                "id": task_id,
                "type": task.type,
                "description": task.description,
                "status": task.status,
                "duration": None
            }

            if task.started_at and task.completed_at:
                duration = (task.completed_at - task.started_at).total_seconds()
                task_info["duration"] = duration

            if task.status == "completed":
                summary["completed_tasks"].append(task_info)
                if isinstance(task, ToolTask):
                    summary["tools_used"].append(task.tool_name)
            elif task.status == "failed":
                task_info["error"] = task.error
                summary["failed_tasks"].append(task_info)

            # Task types counting
            task_type = task.type
            summary["task_types_used"][task_type] = summary["task_types_used"].get(task_type, 0) + 1

        return summary

    async def explain_reasoning_process(self) -> str:
        """Erkläre den Reasoning-Prozess des Agenten"""
        if not LITELLM_AVAILABLE:
            return "Reasoning explanation requires LLM capabilities."

        summary = await self.get_task_execution_summary()

        prompt = f"""
Erkläre den Reasoning-Prozess dieses AI-Agenten in verständlicher Form:

## Ausführungszusammenfassung
- Total Tasks: {summary['total_tasks']}
- Erfolgreich: {len(summary['completed_tasks'])}
- Fehlgeschlagen: {len(summary['failed_tasks'])}
- Plan-Adaptationen: {summary['adaptations']}
- Verwendete Tools: {', '.join(set(summary['tools_used']))}
- Task-Typen: {summary['task_types_used']}

## Task-Details
Erfolgreiche Tasks:
{self._format_tasks_for_explanation(summary['completed_tasks'])}

## Anweisungen
Erkläre in 2-3 Absätzen:
1. Welche Strategie der Agent gewählt hat
2. Wie er die Aufgabe in Tasks unterteilt hat
3. Wie er auf unerwartete Ergebnisse reagiert hat (falls Adaptationen)
4. Was die wichtigsten Erkenntnisse waren

Schreibe für einen technischen Nutzer, aber verständlich."""

        try:
            response = await self.a_run_llm_completion(
                model=self.amd.complex_llm_model,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.5,
                max_tokens=800,task_id="reasoning_explanation"
            )

            return response

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            return f"Could not generate reasoning explanation: {e}"

    def _format_tasks_for_explanation(self, tasks: list[dict]) -> str:
        formatted = []
        for task in tasks[:5]:  # Top 5 tasks
            duration_info = f" ({task['duration']:.1f}s)" if task['duration'] else ""
            formatted.append(f"- {task['type']}: {task['description']}{duration_info}")
        return "\n".join(formatted)

    # ===== PAUSE/RESUME FUNCTIONALITY =====

    async def pause(self) -> bool:
        """Pause agent execution"""
        if not self.is_running:
            return False

        self.is_paused = True
        self.shared["system_status"] = "paused"

        # Create checkpoint
        checkpoint = await self._create_checkpoint()
        await self._save_checkpoint(checkpoint)

        rprint("Agent execution paused")
        return True

    async def resume(self) -> bool:
        """Resume agent execution"""
        if not self.is_paused:
            return False

        self.is_paused = False
        self.shared["system_status"] = "running"

        rprint("Agent execution resumed")
        return True

    # ===== CHECKPOINT MANAGEMENT =====

    async def _create_checkpoint(self) -> AgentCheckpoint:
        """Vereinfachte Checkpoint-Erstellung - fokussiert auf wesentliche Daten"""
        try:
            # Budget Manager Daten vor Checkpoint speichern
            if hasattr(self.amd, 'budget_manager') and self.amd.budget_manager:
                self.amd.budget_manager.save_data()

            # Bereite AMD-Daten vor (ohne budget_manager für Serialisierung)
            amd_data = self.amd.model_dump()
            amd_data['budget_manager'] = None

            # Sammle wesentliche Session-Daten (vereinfacht)
            session_data = {}
            if self.context_manager and self.context_manager.session_managers:
                for session_id, session in self.context_manager.session_managers.items():
                    try:
                        if hasattr(session, 'history') and session.history:
                            # Nur die letzten 20 Nachrichten pro Session für Checkpoint
                            recent_history = session.history[-20:]
                            session_data[session_id] = {
                                "history": recent_history,
                                "session_type": "chatsession",
                                "message_count": len(session.history)
                            }
                        elif isinstance(session, dict) and session.get('history'):
                            session_data[session_id] = {
                                "history": session['history'][-20:],
                                "session_type": "fallback",
                                "message_count": len(session['history'])
                            }
                    except Exception as e:
                        rprint(f"Skipping session {session_id} in checkpoint: {e}")

            # Sammle serialisierbare Variable-Scopes
            variable_scopes = {}
            if self.variable_manager:
                NON_SERIALIZABLE_KEYS = {
                    "tool_registry", "variable_manager", "context_manager", "agent_instance",
                    "llm_tool_node_instance", "task_planner_instance", "task_executor_instance",
                    "progress_tracker", "session_managers", "stream_callback"
                }

                for scope_name, scope_data in self.variable_manager.scopes.items():
                    if isinstance(scope_data, dict):
                        # Filtere nicht-serialisierbare Objekte heraus
                        clean_scope = {
                            k: v for k, v in scope_data.items()
                            if k not in NON_SERIALIZABLE_KEYS
                        }
                        variable_scopes[scope_name] = clean_scope
                    else:
                        try:
                            # Teste Serialisierbarkeit
                            pickle.dumps(scope_data)
                            variable_scopes[scope_name] = scope_data
                        except:
                            # Überspringe nicht-serialisierbare Scopes
                            pass

            # Erstelle konsolidierten Checkpoint
            checkpoint = AgentCheckpoint(
                timestamp=datetime.now(),
                agent_state={
                    "is_running": self.is_running,
                    "is_paused": self.is_paused,
                    "amd_data": amd_data,
                    "active_session": self.active_session,
                    "system_status": self.shared.get("system_status", "idle")
                },
                task_state={
                    task_id: asdict(task) for task_id, task in self.shared.get("tasks", {}).items()
                },
                world_model=self.shared.get("world_model", {}),
                active_flows=["task_flow", "response_flow"],
                metadata={
                    "session_id": self.shared.get("session_id", "default"),
                    "last_query": self.shared.get("current_query", ""),
                    "checkpoint_version": "3.0_simplified",
                    "agent_name": self.amd.name
                },
                # Konsolidierte Zusatzdaten
                session_data=session_data,
                variable_scopes=variable_scopes,
                results_store=self.shared.get("results", {}),
                conversation_history=self.shared.get("conversation_history", [])[-50:],  # Letzte 50 Nachrichten
                tool_capabilities=self._tool_capabilities.copy()
            )

            rprint(f"Vereinfachter Checkpoint erstellt mit {len(session_data)} Sessions")
            return checkpoint

        except Exception as e:
            eprint(f"Checkpoint-Erstellung fehlgeschlagen: {e}")
            import traceback
            print(traceback.format_exc())
            raise

    async def _save_checkpoint(self, checkpoint: AgentCheckpoint, filepath: str = None):
        """Vereinfachtes Checkpoint-Speichern - alles in eine Datei"""
        try:
            from toolboxv2 import get_app
            folder = str(get_app().data_dir) + '/Agents/checkpoint/' + self.amd.name
            if not os.path.exists(folder):
                os.makedirs(folder, exist_ok=True)

            if not filepath:
                timestamp = checkpoint.timestamp.strftime("%Y%m%d_%H%M%S")
                filepath = f"agent_checkpoint_{timestamp}.pkl"
            filepath = os.path.join(folder, filepath)

            # Sessions vor dem Speichern synchronisieren
            if self.context_manager and self.context_manager.session_managers:
                for session_id, session in self.context_manager.session_managers.items():
                    try:
                        if hasattr(session, 'save'):
                            await session.save()
                        elif hasattr(session, '_save_to_memory'):
                            session._save_to_memory()
                    except Exception as e:
                        rprint(f"Session sync error für {session_id}: {e}")

            # Speichere Checkpoint direkt
            with open(filepath, 'wb') as f:
                pickle.dump(checkpoint, f)

            self.last_checkpoint = checkpoint.timestamp

            # Erstelle einfache Zusammenfassung
            summary_parts = []
            if hasattr(checkpoint, 'session_data') and checkpoint.session_data:
                summary_parts.append(f"{len(checkpoint.session_data)} sessions")
            if checkpoint.task_state:
                completed_tasks = len([t for t in checkpoint.task_state.values() if t.get("status") == "completed"])
                summary_parts.append(f"{completed_tasks} completed tasks")
            if hasattr(checkpoint, 'variable_scopes') and checkpoint.variable_scopes:
                summary_parts.append(f"{len(checkpoint.variable_scopes)} variable scopes")

            summary = "; ".join(summary_parts) if summary_parts else "Basic checkpoint"
            rprint(f"Checkpoint gespeichert: {filepath} ({summary})")
            return True

        except Exception as e:
            eprint(f"Checkpoint-Speicherung fehlgeschlagen: {e}")
            print(checkpoint)
            return False

    async def load_latest_checkpoint(self, auto_restore_history: bool = True, max_age_hours: int = 24) -> dict[
        str, Any]:
        """Vereinfachtes Checkpoint-Laden mit automatischer History-Wiederherstellung"""
        try:
            from toolboxv2 import get_app
            folder = str(get_app().data_dir) + '/Agents/checkpoint/' + self.amd.name

            if not os.path.exists(folder):
                return {"success": False, "error": "Kein Checkpoint-Verzeichnis gefunden"}

            # Finde neuesten Checkpoint
            checkpoint_files = []
            for file in os.listdir(folder):
                if file.endswith('.pkl') and file.startswith('agent_checkpoint_'):
                    filepath = os.path.join(folder, file)
                    try:
                        timestamp_str = file.replace('agent_checkpoint_', '').replace('.pkl', '')
                        if timestamp_str == 'final_checkpoint':
                            file_time = datetime.fromtimestamp(os.path.getmtime(filepath))
                        else:
                            file_time = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")

                        age_hours = (datetime.now() - file_time).total_seconds() / 3600
                        if age_hours <= max_age_hours:
                            checkpoint_files.append((filepath, file_time, age_hours))
                    except Exception:
                        continue

            if not checkpoint_files:
                return {"success": False, "error": f"Keine gültigen Checkpoints in {max_age_hours} Stunden gefunden"}

            # Lade neuesten Checkpoint
            checkpoint_files.sort(key=lambda x: x[1], reverse=True)
            latest_checkpoint_path, latest_timestamp, age_hours = checkpoint_files[0]

            rprint(f"Lade Checkpoint: {latest_checkpoint_path} (Alter: {age_hours:.1f}h)")

            with open(latest_checkpoint_path, 'rb') as f:
                checkpoint: AgentCheckpoint = pickle.load(f)

            # Stelle Agent-Status wieder her
            restore_stats = await self._restore_from_checkpoint_simplified(checkpoint, auto_restore_history)

            # Re-initialisiere Kontext-Awareness
            await self.initialize_context_awareness()

            return {
                "success": True,
                "checkpoint_file": latest_checkpoint_path,
                "checkpoint_age_hours": age_hours,
                "checkpoint_timestamp": latest_timestamp.isoformat(),
                "available_checkpoints": len(checkpoint_files),
                "restore_stats": restore_stats
            }

        except Exception as e:
            eprint(f"Checkpoint-Laden fehlgeschlagen: {e}")
            import traceback
            print(traceback.format_exc())
            return {"success": False, "error": str(e)}

    async def _restore_from_checkpoint_simplified(self, checkpoint: AgentCheckpoint, auto_restore_history: bool) -> \
    dict[
        str, Any]:
        """Vereinfachte Checkpoint-Wiederherstellung"""
        restore_stats = {
            "agent_state_restored": False,
            "world_model_restored": False,
            "tasks_restored": 0,
            "sessions_restored": 0,
            "variables_restored": 0,
            "conversation_restored": 0,
            "errors": []
        }

        try:
            # 1. Agent-Status wiederherstellen
            if checkpoint.agent_state:
                self.is_running = checkpoint.agent_state.get("is_running", False)
                self.is_paused = checkpoint.agent_state.get("is_paused", False)
                self.active_session = checkpoint.agent_state.get("active_session")

                # AMD-Daten selektiv wiederherstellen
                amd_data = checkpoint.agent_state.get("amd_data", {})
                if amd_data:
                    # Nur sichere Felder wiederherstellen
                    safe_fields = ["name", "use_fast_response", "max_input_tokens"]
                    for field in safe_fields:
                        if field in amd_data and hasattr(self.amd, field):
                            setattr(self.amd, field, amd_data[field])

                    # Persona wiederherstellen falls vorhanden
                    if "persona" in amd_data and amd_data["persona"]:
                        try:
                            persona_data = amd_data["persona"]
                            if isinstance(persona_data, dict):
                                self.amd.persona = PersonaConfig(**persona_data)
                        except Exception as e:
                            restore_stats["errors"].append(f"Persona restore failed: {e}")

                restore_stats["agent_state_restored"] = True

            # 2. World Model wiederherstellen
            if checkpoint.world_model:
                self.shared["world_model"] = checkpoint.world_model.copy()
                self.world_model = checkpoint.world_model.copy()
                restore_stats["world_model_restored"] = True

            # 3. Variable System wiederherstellen
            if hasattr(checkpoint, 'variable_scopes') and checkpoint.variable_scopes:
                # Variable Manager neu initialisieren
                self.variable_manager = VariableManager(self.shared["world_model"], self.shared)

                # Basis-Scopes einrichten
                self._setup_variable_scopes()

                # Gespeicherte Scopes wiederherstellen
                for scope_name, scope_data in checkpoint.variable_scopes.items():
                    try:
                        self.variable_manager.register_scope(scope_name, scope_data)
                        restore_stats["variables_restored"] += 1
                    except Exception as e:
                        restore_stats["errors"].append(f"Variable scope {scope_name}: {e}")

                # Runtime-Objekte wieder einsetzen
                self.variable_manager.set("shared", "variable_manager", self.variable_manager)
                self.variable_manager.set("shared", "context_manager", self.context_manager)
                self.variable_manager.set("shared", "agent_instance", self)

                self.shared["variable_manager"] = self.variable_manager

            # 4. Tasks wiederherstellen
            if checkpoint.task_state:
                restored_tasks = {}
                for task_id, task_data in checkpoint.task_state.items():
                    try:
                        task_type = task_data.get("type", "generic")
                        if task_type == "LLMTask":
                            restored_tasks[task_id] = LLMTask(**task_data)
                        elif task_type == "ToolTask":
                            restored_tasks[task_id] = ToolTask(**task_data)
                        elif task_type == "DecisionTask":
                            restored_tasks[task_id] = DecisionTask(**task_data)
                        else:
                            restored_tasks[task_id] = Task(**task_data)

                        restore_stats["tasks_restored"] += 1
                    except Exception as e:
                        restore_stats["errors"].append(f"Task {task_id}: {e}")

                self.shared["tasks"] = restored_tasks

            # 5. Results Store wiederherstellen
            if hasattr(checkpoint, 'results_store') and checkpoint.results_store:
                self.shared["results"] = checkpoint.results_store
                if self.variable_manager:
                    self.variable_manager.set_results_store(checkpoint.results_store)

            # 6. Sessions und Conversation wiederherstellen (falls gewünscht)
            if auto_restore_history:
                await self._restore_sessions_and_conversation_simplified(checkpoint, restore_stats)

            # 7. Tool Capabilities wiederherstellen
            if hasattr(checkpoint, 'tool_capabilities') and checkpoint.tool_capabilities:
                self._tool_capabilities = checkpoint.tool_capabilities.copy()

            # Status setzen
            self.shared["system_status"] = "restored"
            restore_stats["restoration_timestamp"] = datetime.now().isoformat()

            rprint(
                f"Checkpoint wiederhergestellt: {restore_stats['tasks_restored']} Tasks, {restore_stats['sessions_restored']} Sessions, {len(restore_stats['errors'])} Fehler")
            return restore_stats

        except Exception as e:
            eprint(f"Checkpoint-Wiederherstellung fehlgeschlagen: {e}")
            import traceback
            print(traceback.format_exc())
            restore_stats["errors"].append(f"Critical restore error: {e}")
            return restore_stats

    async def _restore_sessions_and_conversation_simplified(self, checkpoint: AgentCheckpoint, restore_stats: dict):
        """Vereinfachte Session- und Conversation-Wiederherstellung"""
        try:
            # Context Manager sicherstellen
            if not self.context_manager:
                self.context_manager = UnifiedContextManager(self)
                self.context_manager.variable_manager = self.variable_manager

            # Sessions wiederherstellen
            if hasattr(checkpoint, 'session_data') and checkpoint.session_data:
                for session_id, session_info in checkpoint.session_data.items():
                    try:
                        # Session über Context Manager initialisieren
                        max_length = session_info.get("message_count", 200)
                        restored_session = await self.context_manager.initialize_session(session_id, max_length)

                        # History wiederherstellen
                        history = session_info.get("history", [])
                        if history and hasattr(restored_session, 'history'):
                            # Direkt in Session-History einfügen
                            restored_session.history.extend(history)

                        restore_stats["sessions_restored"] += 1
                    except Exception as e:
                        restore_stats["errors"].append(f"Session {session_id}: {e}")

            # Conversation History wiederherstellen
            if hasattr(checkpoint, 'conversation_history') and checkpoint.conversation_history:
                self.shared["conversation_history"] = checkpoint.conversation_history
                restore_stats["conversation_restored"] = len(checkpoint.conversation_history)

            # Update shared context
            self.shared["context_manager"] = self.context_manager
            if self.context_manager.session_managers:
                self.shared["session_managers"] = self.context_manager.session_managers
                self.shared["session_initialized"] = True

        except Exception as e:
            restore_stats["errors"].append(f"Session/conversation restore failed: {e}")

    async def _maybe_checkpoint(self):
        """Vereinfachtes automatisches Checkpointing"""
        if not self.enable_pause_resume:
            return

        now = datetime.now()
        if (not self.last_checkpoint or
            (now - self.last_checkpoint).seconds >= self.checkpoint_interval):

            try:
                checkpoint = await self._create_checkpoint()
                await self._save_checkpoint(checkpoint)
            except Exception as e:
                eprint(f"Automatic checkpoint failed: {e}")


    async def save_context_to_session(self, session_id: str = None, context_type: str = "full") -> bool:
        """Save current context to ChatSession for persistent storage"""
        try:
            session_id = session_id or self.shared.get("session_id", "default")

            if not self.context_manager:
                eprint("Context manager not available")
                return False

            # Build comprehensive context
            unified_context = await self.context_manager.build_unified_context(session_id, None, context_type)

            # Create context message for session storage
            context_message = {
                "role": "system",
                "content": f"[CONTEXT_SNAPSHOT_{context_type.upper()}] " + json.dumps(unified_context, default=str),
                "timestamp": datetime.now().isoformat(),
                "context_type": context_type,
                "metadata": {
                    "is_context_snapshot": True,
                    "context_version": "2.0",
                    "agent_name": self.amd.name,
                    "session_stats": unified_context.get("session_stats", {}),
                    "variables_count": len(unified_context.get("variables", {}).get("recent_results", [])),
                    "execution_state": unified_context.get("execution_state", {}).get("system_status", "unknown")
                }
            }

            # Store in session
            await self.context_manager.add_interaction(
                session_id,
                "system",
                context_message["content"],
                metadata=context_message["metadata"]
            )

            rprint(f"Context snapshot saved to session {session_id} (type: {context_type})")
            return True

        except Exception as e:
            eprint(f"Failed to save context to session: {e}")
            return False

    async def load_context_from_session(self, session_id: str, context_type: str = "full") -> dict[str, Any]:
        """Load context from ChatSession storage"""
        try:
            if not self.context_manager:
                return {"error": "Context manager not available"}

            session = self.context_manager.session_managers.get(session_id)
            if not session:
                return {"error": f"Session {session_id} not found"}

            # Search for context snapshots in session history
            context_snapshots = []

            if hasattr(session, 'history'):
                for message in reversed(session.history):  # Search from newest
                    if (message.get("role") == "system" and
                        message.get("metadata", {}).get("is_context_snapshot") and
                        message.get("metadata", {}).get("context_type") == context_type):

                        try:
                            # Extract context data
                            content = message.get("content", "")
                            if content.startswith(f"[CONTEXT_SNAPSHOT_{context_type.upper()}]"):
                                json_data = content.replace(f"[CONTEXT_SNAPSHOT_{context_type.upper()}] ", "")
                                context_data = json.loads(json_data)
                                context_snapshots.append({
                                    "context": context_data,
                                    "timestamp": message.get("timestamp"),
                                    "metadata": message.get("metadata", {})
                                })
                        except Exception as e:
                            wprint(f"Failed to parse context snapshot: {e}")

            if context_snapshots:
                # Return most recent context snapshot
                latest_context = context_snapshots[0]
                rprint(f"Loaded context snapshot from session {session_id} (timestamp: {latest_context['timestamp']})")
                return latest_context["context"]
            else:
                return {"error": f"No context snapshots of type '{context_type}' found in session {session_id}"}

        except Exception as e:
            eprint(f"Failed to load context from session: {e}")
            return {"error": str(e)}

    async def cleanup_session_context(self, session_id: str = None, keep_count: int = 100,
                                      remove_old_snapshots: bool = True) -> dict[str, Any]:
        """Cleanup session context by removing old snapshots and entries"""
        try:
            session_id = session_id or self.shared.get("session_id", "default")

            if not self.context_manager:
                return {"error": "Context manager not available"}

            session = self.context_manager.session_managers.get(session_id)
            if not session or not hasattr(session, 'history'):
                return {"error": f"Session {session_id} not found or has no history"}

            cleanup_stats = {
                "original_message_count": len(session.history),
                "context_snapshots_removed": 0,
                "context_entries_removed": 0,
                "regular_messages_kept": 0,
                "cleanup_performed": False
            }

            if len(session.history) <= keep_count:
                return {**cleanup_stats, "message": "No cleanup needed"}

            # Separate different types of messages
            regular_messages = []
            context_snapshots = []
            context_entries = []

            for message in session.history:
                metadata = message.get("metadata", {})

                if metadata.get("is_context_snapshot"):
                    context_snapshots.append(message)
                elif metadata.get("is_context_entry"):
                    context_entries.append(message)
                else:
                    regular_messages.append(message)

            # Keep most recent regular messages
            messages_to_keep = regular_messages[-keep_count:]
            cleanup_stats["regular_messages_kept"] = len(messages_to_keep)

            # Keep most recent context snapshots (if not removing)
            if not remove_old_snapshots:
                recent_snapshots = context_snapshots[-5:]  # Keep last 5 snapshots
                messages_to_keep.extend(recent_snapshots)
            else:
                cleanup_stats["context_snapshots_removed"] = len(context_snapshots)

            # Keep persistent context entries
            persistent_entries = [
                entry for entry in context_entries
                if entry.get("persistent", True)
            ]
            messages_to_keep.extend(persistent_entries)
            cleanup_stats["context_entries_removed"] = len(context_entries) - len(persistent_entries)

            # Sort by timestamp and update session
            messages_to_keep.sort(key=lambda x: x.get("timestamp", ""))
            session.history = messages_to_keep

            cleanup_stats.update({
                "final_message_count": len(session.history),
                "cleanup_performed": True,
                "messages_removed": cleanup_stats["original_message_count"] - len(session.history)
            })

            rprint(f"Session cleanup completed: {cleanup_stats['messages_removed']} messages removed")
            return cleanup_stats

        except Exception as e:
            eprint(f"Failed to cleanup session context: {e}")
            return {"error": str(e)}

    def get_session_storage_stats(self) -> dict[str, Any]:
        """Get comprehensive session storage statistics"""
        try:
            stats = {
                "context_manager_active": bool(self.context_manager),
                "total_sessions": 0,
                "session_details": {},
                "storage_summary": {
                    "total_messages": 0,
                    "context_snapshots": 0,
                    "context_entries": 0,
                    "regular_messages": 0
                }
            }

            if not self.context_manager:
                return stats

            stats["total_sessions"] = len(self.context_manager.session_managers)

            for session_id, session in self.context_manager.session_managers.items():
                session_stats = {
                    "session_type": "chatsession" if hasattr(session, 'history') else "fallback",
                    "message_count": 0,
                    "context_snapshots": 0,
                    "context_entries": 0,
                    "regular_messages": 0,
                    "storage_size_estimate": 0
                }

                if hasattr(session, 'history'):
                    session_stats["message_count"] = len(session.history)

                    for message in session.history:
                        content_size = len(str(message))
                        session_stats["storage_size_estimate"] += content_size

                        metadata = message.get("metadata", {})
                        if metadata.get("is_context_snapshot"):
                            session_stats["context_snapshots"] += 1
                        elif metadata.get("is_context_entry"):
                            session_stats["context_entries"] += 1
                        else:
                            session_stats["regular_messages"] += 1

                elif isinstance(session, dict) and 'history' in session:
                    session_stats["message_count"] = len(session['history'])
                    session_stats["regular_messages"] = len(session['history'])
                    session_stats["storage_size_estimate"] = sum(len(str(msg)) for msg in session['history'])

                stats["session_details"][session_id] = session_stats

                # Update totals
                stats["storage_summary"]["total_messages"] += session_stats["message_count"]
                stats["storage_summary"]["context_snapshots"] += session_stats["context_snapshots"]
                stats["storage_summary"]["context_entries"] += session_stats["context_entries"]
                stats["storage_summary"]["regular_messages"] += session_stats["regular_messages"]

            # Estimate total storage size
            stats["storage_summary"]["estimated_total_size_kb"] = sum(
                details["storage_size_estimate"] for details in stats["session_details"].values()
            ) / 1024

            return stats

        except Exception as e:
            eprint(f"Failed to get session storage stats: {e}")
            return {"error": str(e)}

    def list_available_checkpoints(self, max_age_hours: int = 168) -> list[dict[str, Any]]:  # Default 1 week
        """List all available checkpoints with metadata"""
        try:
            from toolboxv2 import get_app
            folder = str(get_app().data_dir) + '/Agents/checkpoint/' + self.amd.name

            if not os.path.exists(folder):
                return []

            checkpoints = []
            for file in os.listdir(folder):
                if file.endswith('.pkl') and file.startswith('agent_checkpoint_'):
                    filepath = os.path.join(folder, file)
                    try:
                        # Get file info
                        file_stat = os.stat(filepath)
                        file_size = file_stat.st_size
                        modified_time = datetime.fromtimestamp(file_stat.st_mtime)

                        # Extract timestamp from filename
                        timestamp_str = file.replace('agent_checkpoint_', '').replace('.pkl', '')
                        if timestamp_str == 'final_checkpoint':
                            checkpoint_time = modified_time
                            checkpoint_type = "final"
                        else:
                            checkpoint_time = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")
                            checkpoint_type = "regular"

                        # Check age
                        age_hours = (datetime.now() - checkpoint_time).total_seconds() / 3600
                        if age_hours <= max_age_hours:

                            # Try to load checkpoint metadata without full loading
                            metadata = {}
                            try:
                                with open(filepath, 'rb') as f:
                                    checkpoint = pickle.load(f)
                                metadata = {
                                    "tasks_count": len(checkpoint.task_state) if checkpoint.task_state else 0,
                                    "world_model_entries": len(checkpoint.world_model) if checkpoint.world_model else 0,
                                    "session_id": checkpoint.metadata.get("session_id", "unknown") if hasattr(
                                        checkpoint, 'metadata') and checkpoint.metadata else "unknown",
                                    "last_query": checkpoint.metadata.get("last_query", "unknown")[:100] if hasattr(
                                        checkpoint, 'metadata') and checkpoint.metadata else "unknown"
                                }
                            except:
                                metadata = {"load_error": True}

                            checkpoints.append({
                                "filepath": filepath,
                                "filename": file,
                                "checkpoint_type": checkpoint_type,
                                "timestamp": checkpoint_time.isoformat(),
                                "age_hours": round(age_hours, 1),
                                "file_size_kb": round(file_size / 1024, 1),
                                "metadata": metadata
                            })

                    except Exception as e:
                        import traceback
                        print(traceback.format_exc())
                        wprint(f"Could not analyze checkpoint file {file}: {e}")
                        continue

            # Sort by timestamp (newest first)
            checkpoints.sort(key=lambda x: x["timestamp"], reverse=True)

            return checkpoints

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            eprint(f"Failed to list checkpoints: {e}")
            return []

    async def delete_old_checkpoints(self, keep_count: int = 5, max_age_hours: int = 168) -> dict[str, Any]:
        """Delete old checkpoints, keeping the most recent ones"""
        try:
            checkpoints = self.list_available_checkpoints(
                max_age_hours=max_age_hours * 2)  # Look further back for deletion

            deleted_count = 0
            deleted_size_kb = 0
            errors = []

            if len(checkpoints) > keep_count:
                # Keep the newest, delete the rest (except final checkpoint)
                to_delete = checkpoints[keep_count:]

                for checkpoint in to_delete:
                    if checkpoint["checkpoint_type"] != "final":  # Never delete final checkpoint
                        try:
                            os.remove(checkpoint["filepath"])
                            deleted_count += 1
                            deleted_size_kb += checkpoint["file_size_kb"]
                            rprint(f"Deleted old checkpoint: {checkpoint['filename']}")
                        except Exception as e:
                            import traceback
                            print(traceback.format_exc())
                            errors.append(f"Failed to delete {checkpoint['filename']}: {e}")

            # Also delete checkpoints older than max_age_hours
            old_checkpoints = [cp for cp in checkpoints if
                               cp["age_hours"] > max_age_hours and cp["checkpoint_type"] != "final"]
            for checkpoint in old_checkpoints:
                if checkpoint not in checkpoints[keep_count:]:  # Don't double-delete
                    try:
                        os.remove(checkpoint["filepath"])
                        deleted_count += 1
                        deleted_size_kb += checkpoint["file_size_kb"]
                        rprint(f"Deleted aged checkpoint: {checkpoint['filename']}")
                    except Exception as e:
                        import traceback
                        print(traceback.format_exc())
                        errors.append(f"Failed to delete {checkpoint['filename']}: {e}")

            return {
                "success": True,
                "deleted_count": deleted_count,
                "freed_space_kb": round(deleted_size_kb, 1),
                "remaining_checkpoints": len(checkpoints) - deleted_count,
                "errors": errors
            }

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            eprint(f"Failed to delete old checkpoints: {e}")
            return {
                "success": False,
                "error": str(e),
                "deleted_count": 0
            }

    # ===== TOOL AND NODE MANAGEMENT =====
    def _get_tool_analysis_path(self) -> str:
        """Get path for tool analysis cache"""
        from toolboxv2 import get_app
        folder = str(get_app().data_dir) + '/Agents/capabilities/' + self.amd.name
        os.makedirs(folder, exist_ok=True)
        return folder + '/tool_capabilities.json'

    def _get_context_path(self, session_id=None) -> str:
        """Get path for tool analysis cache"""
        from toolboxv2 import get_app
        folder = str(get_app().data_dir) + '/Agents/context/' + self.amd.name
        os.makedirs(folder, exist_ok=True)
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        session_suffix = f"_session_{session_id}" if session_id else ""
        filepath = f"agent_context_{self.amd.name}_{timestamp}{session_suffix}.json"
        return folder + f'/{filepath}'

    def add_first_class_tool(self, tool_func: Callable, name: str, description: str):
        """
        Add a first-class meta-tool that can be used by the LLMReasonerNode.
        These are different from regular tools - they control agent sub-systems.

        Args:
            tool_func: The function to register as a meta-tool
            name: Name of the meta-tool
            description: Description of when and how to use it
        """

        # Validate the tool function
        if not callable(tool_func):
            raise ValueError("Tool function must be callable")

        # Register in the reasoner's meta-tool registry (if reasoner exists)
        if hasattr(self.task_flow, 'llm_reasoner'):
            if not hasattr(self.task_flow.llm_reasoner, 'meta_tools_registry'):
                self.task_flow.llm_reasoner.meta_tools_registry = {}

            self.task_flow.llm_reasoner.meta_tools_registry[name] = {
                "function": tool_func,
                "description": description,
                "added_at": datetime.now().isoformat()
            }

            rprint(f"First-class meta-tool added: {name}")
        else:
            wprint("LLMReasonerNode not available for first-class tool registration")

    async def add_tool(self, tool_func: Callable, name: str = None, description: str = None, is_new=False):
        """Enhanced tool addition with intelligent analysis"""
        if not asyncio.iscoroutinefunction(tool_func):
            @wraps(tool_func)
            async def async_wrapper(*args, **kwargs):
                return await asyncio.to_thread(tool_func, *args, **kwargs)

            effective_func = async_wrapper
        else:
            effective_func = tool_func

        tool_name = name or effective_func.__name__
        tool_description = description or effective_func.__doc__ or "No description"

        # Store in registry
        self._tool_registry[tool_name] = {
            "function": effective_func,
            "description": tool_description,
            "args_schema": get_args_schema(tool_func)
        }

        # Add to available tools list
        if tool_name not in self.shared["available_tools"]:
            self.shared["available_tools"].append(tool_name)

        # Intelligent tool analysis
        if is_new:
            await self._analyze_tool_capabilities(tool_name, tool_description)

        rprint(f"Tool added with analysis: {tool_name}")

    async def _analyze_tool_capabilities(self, tool_name: str, description: str, tool_args:str):
        """Analyze tool capabilities with LLM for smart usage"""

        # Try to load existing analysis
        existing_analysis = self._load_tool_analysis()

        if tool_name in existing_analysis:
            try:
                # Validate cached data against the Pydantic model
                ToolAnalysis.model_validate(existing_analysis[tool_name])
                self._tool_capabilities[tool_name] = existing_analysis[tool_name]
                rprint(f"Loaded and validated cached analysis for {tool_name}")
            except ValidationError as e:
                wprint(f"Cached data for {tool_name} is invalid and will be regenerated: {e}")
                del self._tool_capabilities[tool_name]

        if not LITELLM_AVAILABLE:
            # Fallback analysis
            self._tool_capabilities[tool_name] = {
                "use_cases": [description],
                "triggers": [tool_name.lower().replace('_', ' ')],
                "complexity": "unknown",
                "confidence": 0.3
            }
            return

        # LLM-based intelligent analysis
        prompt = f"""
Analyze this tool and identify ALL possible use cases, triggers, and connections:

Tool Name: {tool_name}
args: {tool_args}
Description: {description}


Provide a comprehensive analysis covering:

1. OBVIOUS use cases (direct functionality)
2. INDIRECT connections (when this tool might be relevant)
3. TRIGGER PHRASES (what user queries would benefit from this tool)
4. COMPLEX scenarios (non-obvious applications)
5. CONTEXTUAL usage (when combined with other information)

Example for a "get_user_name" tool:
- Obvious: When user asks "what is my name"
- Indirect: Personalization, greetings, user identification
- Triggers: "my name", "who am I", "hello", "introduce yourself", "personalize"
- Complex: User context in multi-step tasks, addressing user directly
- Contextual: Any response that could be personalized

Rule! no additional comments or text in the format !
schema:
 {yaml.dump(ToolAnalysis.model_json_schema())}

Respond in YAML format:
Example:
```yaml
primary_function: "Retrieves the current user's name."
use_cases:
  - "Responding to 'what is my name?'"
  - "Personalizing greeting messages."
trigger_phrases:
  - "my name"
  - "who am I"
  - "introduce yourself"
indirect_connections:
  - "User identification in multi-factor authentication."
  - "Tagging user-generated content."
complexity_scenarios:
  - "In a multi-step task, remembering the user's name to personalize the final output."
user_intent_categories:
  - "Personalization"
  - "User Identification"
confidence_triggers:
  "my name": 0.95
  "who am I": 0.9
tool_complexity: low/medium/high
```
"""
        model = os.getenv("BASEMODEL", self.amd.fast_llm_model)
        for i in range(3):
            try:
                response = await self.a_run_llm_completion(
                    model=model,
                    messages=[{"role": "user", "content": prompt}],
                    with_context=False,
                    temperature=0.3,
                    max_tokens=1000,
                    task_id=f"tool_analysis_{tool_name}"
                )

                content = response.strip()

                # Extract JSON
                if "```yaml" in content:
                    yaml_str = content.split("```yaml")[1].split("```")[0].strip()
                else:
                    yaml_str = content

                analysis = yaml.safe_load(yaml_str)

                # Store analysis
                self._tool_capabilities[tool_name] = analysis

                # Save to cache
                await self._save_tool_analysis()

                validated_analysis = ToolAnalysis.model_validate(analysis)
                rprint(f"Generated intelligent analysis for {tool_name}")
                break

            except Exception as e:
                import traceback
                print(traceback.format_exc())
                model = self.amd.complex_llm_model if i > 1 else self.amd.fast_llm_model
                eprint(f"Tool analysis failed for {tool_name}: {e}")
                # Fallback
                self._tool_capabilities[tool_name] = {
                    "primary_function": description,
                    "use_cases": [description],
                    "trigger_phrases": [tool_name.lower().replace('_', ' ')],
                    "tool_complexity": "medium"
                }

    def _load_tool_analysis(self) -> dict[str, Any]:
        """Load tool analysis from cache"""
        try:
            if os.path.exists(self.tool_analysis_file):
                with open(self.tool_analysis_file) as f:
                    return json.load(f)
        except Exception as e:
            wprint(f"Could not load tool analysis: {e}")
        return {}


    async def save_context_to_file(self, session_id: str = None) -> bool:
        """Save current context to file"""
        try:
            context = await self.get_context(session_id=session_id, format_for_llm=False)

            filepath = self._get_context_path(session_id)

            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(context, f, indent=2, ensure_ascii=False, default=str)

            rprint(f"Context saved to: {filepath}")
            return True

        except Exception as e:
            eprint(f"Failed to save context: {e}")
            return False

    async def _save_tool_analysis(self):
        """Save tool analysis to cache"""
        try:
            with open(self.tool_analysis_file, 'w') as f:
                json.dump(self._tool_capabilities, f, indent=2)
        except Exception as e:
            eprint(f"Could not save tool analysis: {e}")

    def add_custom_flow(self, flow: AsyncFlow, name: str):
        """Add a custom flow for dynamic execution"""
        self.add_tool(flow.run_async, name=name, description=f"Custom flow: {flow.__class__.__name__}")
        rprint(f"Custom node added: {name}")

    def get_tool_by_name(self, tool_name: str) -> Callable | None:
        """Get tool function by name"""
        return self._tool_registry.get(tool_name, {}).get("function")

    async def arun_function(self, function_name: str, *args, **kwargs) -> Any:
        """
        Asynchronously finds a function by its string name, executes it with
        the given arguments, and returns the result.
        """
        rprint(f"Attempting to run function: {function_name} with args: {args}, kwargs: {kwargs}")
        target_function = self.get_tool_by_name(function_name)

        start_time = time.perf_counter()
        if not target_function:
            raise ValueError(f"Function '{function_name}' not found in the {self.amd.name}'s registered tools.")

        try:
            if asyncio.iscoroutinefunction(target_function):
                result = await target_function(*args, **kwargs)
            else:
                # If the function is not async, run it in a thread pool
                loop = asyncio.get_running_loop()
                result = await loop.run_in_executor(None, lambda: target_function(*args, **kwargs))

            if asyncio.iscoroutine(result):
                result = await result

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="tool_call",  # Vereinheitlicht zu tool_call
                    node_name="FlowAgent",
                    status=NodeStatus.COMPLETED,
                    success=True,
                    duration=time.perf_counter() - start_time,
                    tool_name=function_name,
                    tool_args=kwargs,
                    tool_result=result,
                    is_meta_tool=False,  # Klarstellen, dass es kein Meta-Tool ist
                    metadata={
                        "result_type": type(result).__name__,
                        "result_length": len(str(result))
                    }
                ))
            rprint(f"Function {function_name} completed successfully with result: {result}")
            return result

        except Exception as e:
            eprint(f"Function {function_name} execution failed: {e}")
            raise

    # ===== FORMATTING =====

    async def a_format_class(self,
                             pydantic_model: type[BaseModel],
                             prompt: str,
                             message_context: list[dict] = None,
                             max_retries: int = 2, auto_context=True, session_id: str = None, **kwargs) -> dict[str, Any]:
        """
        State-of-the-art LLM-based structured data formatting using Pydantic models.

        Args:
            pydantic_model: The Pydantic model class to structure the response
            prompt: The main prompt for the LLM
            message_context: Optional conversation context messages
            max_retries: Maximum number of retry attempts

        Returns:
            dict: Validated structured data matching the Pydantic model

        Raises:
            ValidationError: If the LLM response cannot be validated against the model
            RuntimeError: If all retry attempts fail
        """

        if not LITELLM_AVAILABLE:
            raise RuntimeError("LiteLLM is required for structured formatting but not available")

        if session_id and self.active_session != session_id:
            self.active_session = session_id
        # Generate schema documentation
        schema = pydantic_model.model_json_schema()
        model_name = pydantic_model.__name__

        # Create enhanced prompt with schema
        enhanced_prompt = f"""
    {prompt}

    CRITICAL FORMATTING REQUIREMENTS:
    1. Respond ONLY in valid YAML format
    2. Follow the exact schema structure provided
    3. Use appropriate data types (strings, lists, numbers, booleans)
    4. Include ALL required fields
    5. No additional comments, explanations, or text outside the YAML

    SCHEMA FOR {model_name}:
    {yaml.dump(schema, default_flow_style=False, indent=2)}

    EXAMPLE OUTPUT FORMAT:
    ```yaml
    # Your response here following the schema exactly
    field_name: "value"
    list_field:
      - "item1"
      - "item2"
    boolean_field: true
    number_field: 42
Respond in YAML format only:
"""
        # Prepare messages
        messages = []
        if message_context:
            messages.extend(message_context)
        messages.append({"role": "user", "content": enhanced_prompt})

        # Retry logic with progressive adjustments
        last_error = None

        for attempt in range(max_retries + 1):
            try:
                # Adjust parameters based on attempt
                temperature = 0.1 + (attempt * 0.1)  # Increase temperature slightly on retries
                max_tokens = min(2000 + (attempt * 500), 4000)  # Increase token limit on retries

                rprint(f"[{model_name}] Attempt {attempt + 1}/{max_retries + 1} (temp: {temperature})")

                # Generate LLM response
                response = await self.a_run_llm_completion(
                    model=self.amd.complex_llm_model,
                    messages=messages,
                    with_context=auto_context,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    task_id=f"format_{model_name.lower()}_{attempt}"
                )

                if not response or not response.strip():
                    raise ValueError("Empty response from LLM")

                # Extract YAML content with multiple fallback strategies
                yaml_content = self._extract_yaml_content(response)

                if not yaml_content:
                    raise ValueError("No valid YAML content found in response")

                # Parse YAML
                try:
                    parsed_data = yaml.safe_load(yaml_content)
                except yaml.YAMLError as e:
                    raise ValueError(f"Invalid YAML syntax: {e}")

                if not isinstance(parsed_data, dict):
                    raise ValueError(f"Expected dict, got {type(parsed_data)}")

                # Validate against Pydantic model
                try:
                    validated_instance = pydantic_model.model_validate(parsed_data)
                    validated_data = validated_instance.model_dump()

                    rprint(f"✅ Successfully formatted {model_name} on attempt {attempt + 1}")
                    return validated_data

                except ValidationError as e:
                    detailed_errors = []
                    for error in e.errors():
                        field_path = " -> ".join(str(x) for x in error['loc'])
                        detailed_errors.append(f"Field '{field_path}': {error['msg']}")

                    error_msg = "Validation failed:\n" + "\n".join(detailed_errors)
                    raise ValueError(error_msg)

            except Exception as e:
                last_error = e
                wprint(f"[{model_name}] Attempt {attempt + 1} failed: {str(e)}")

                if attempt < max_retries:
                    # Add error feedback for next attempt
                    error_feedback = f"\n\nPREVIOUS ATTEMPT FAILED: {str(e)}\nPlease correct the issues and provide valid YAML matching the schema exactly."
                    messages[-1]["content"] = enhanced_prompt + error_feedback

                    # Brief delay before retry
                    await asyncio.sleep(0.5 * (attempt + 1))
                else:
                    eprint(f"[{model_name}] All {max_retries + 1} attempts failed")

        # All attempts failed
        raise RuntimeError(f"Failed to format {model_name} after {max_retries + 1} attempts. Last error: {last_error}")

    def _extract_yaml_content(self, response: str) -> str:
        """Extract YAML content from LLM response with multiple strategies"""
        # Strategy 1: Extract from code blocks
        if "```yaml" in response:
            try:
                yaml_content = response.split("```yaml")[1].split("```")[0].strip()
                if yaml_content:
                    return yaml_content
            except IndexError:
                pass

        # Strategy 2: Extract from generic code blocks
        if "```" in response:
            try:
                parts = response.split("```")
                for i, part in enumerate(parts):
                    if i % 2 == 1:  # Odd indices are inside code blocks
                        # Skip if it starts with a language identifier
                        lines = part.strip().split('\n')
                        if lines and not lines[0].strip().isalpha():
                            return part.strip()
                        elif len(lines) > 1:
                            # Try without first line
                            return '\n'.join(lines[1:]).strip()
            except:
                pass

        # Strategy 3: Look for YAML-like patterns
        lines = response.split('\n')
        yaml_lines = []
        in_yaml = False

        for line in lines:
            stripped = line.strip()

            # Detect start of YAML-like content
            if ':' in stripped and not stripped.startswith('#'):
                in_yaml = True
                yaml_lines.append(line)
            elif in_yaml:
                if stripped == '' or stripped.startswith(' ') or stripped.startswith('-') or ':' in stripped:
                    yaml_lines.append(line)
                else:
                    # Potential end of YAML
                    break

        if yaml_lines:
            return '\n'.join(yaml_lines).strip()

        # Strategy 4: Return entire response if it looks like YAML
        if ':' in response and not response.strip().startswith('<'):
            return response.strip()

        return ""
    # ===== SERVER SETUP =====

    def setup_a2a_server(self, host: str = "0.0.0.0", port: int = 5000, **kwargs):
        """Setup A2A server for bidirectional communication"""
        if not A2A_AVAILABLE:
            wprint("A2A not available, cannot setup server")
            return

        try:
            self.a2a_server = A2AServer(
                host=host,
                port=port,
                agent_card=AgentCard(
                    name=self.amd.name,
                    description="Production-ready PocketFlow agent",
                    version="1.0.0"
                ),
                **kwargs
            )

            # Register agent methods
            @self.a2a_server.route("/run")
            async def handle_run(request_data):
                query = request_data.get("query", "")
                session_id = request_data.get("session_id", "a2a_session")

                response = await self.a_run(query, session_id=session_id)
                return {"response": response}

            rprint(f"A2A server setup on {host}:{port}")

        except Exception as e:
            eprint(f"Failed to setup A2A server: {e}")

    def setup_mcp_server(self, host: str = "0.0.0.0", port: int = 8000, name: str = None, **kwargs):
        """Setup MCP server"""
        if not MCP_AVAILABLE:
            wprint("MCP not available, cannot setup server")
            return

        try:
            server_name = name or f"{self.amd.name}_MCP"
            self.mcp_server = FastMCP(server_name)

            # Register agent as MCP tool
            @self.mcp_server.tool()
            async def agent_run(query: str, session_id: str = "mcp_session") -> str:
                """Execute agent with given query"""
                return await self.a_run(query, session_id=session_id)

            rprint(f"MCP server setup: {server_name}")

        except Exception as e:
            eprint(f"Failed to setup MCP server: {e}")

    # ===== LIFECYCLE MANAGEMENT =====

    async def start_servers(self):
        """Start all configured servers"""
        tasks = []

        if self.a2a_server:
            tasks.append(asyncio.create_task(self.a2a_server.start()))

        if self.mcp_server:
            tasks.append(asyncio.create_task(self.mcp_server.run()))

        if tasks:
            rprint(f"Starting {len(tasks)} servers...")
            await asyncio.gather(*tasks, return_exceptions=True)

    def clear_context(self, session_id: str = None) -> bool:
        """Clear context über UnifiedContextManager mit Session-spezifischer Unterstützung"""
        try:
            #Clear über Context Manager
            if session_id:
                # Clear specific session
                if session_id in self.context_manager.session_managers:
                    session = self.context_manager.session_managers[session_id]
                    if hasattr(session, 'history'):
                        session.history = []
                    elif isinstance(session, dict) and 'history' in session:
                        session['history'] = []

                    # Remove from session managers
                    del self.context_manager.session_managers[session_id]

                    # Clear variable manager scope for this session
                    if self.variable_manager:
                        scope_name = f'session_{session_id}'
                        if scope_name in self.variable_manager.scopes:
                            del self.variable_manager.scopes[scope_name]

                    rprint(f"Context cleared for session: {session_id}")
            else:
                # Clear all sessions
                for session_id, session in self.context_manager.session_managers.items():
                    if hasattr(session, 'history'):
                        session.history = []
                    elif isinstance(session, dict) and 'history' in session:
                        session['history'] = []

                self.context_manager.session_managers = {}
                rprint("Context cleared for all sessions")

            # Clear context cache
            self.context_manager._invalidate_cache(session_id)

            # Clear current execution context in shared
            context_keys_to_clear = [
                "current_query", "current_response", "current_plan", "tasks",
                "results", "task_plans", "session_data", "formatted_context",
                "synthesized_response", "quality_assessment", "plan_adaptations",
                "executor_performance", "llm_tool_conversation", "aggregated_context"
            ]

            for key in context_keys_to_clear:
                if key in self.shared:
                    if isinstance(self.shared[key], dict):
                        self.shared[key] = {}
                    elif isinstance(self.shared[key], list):
                        self.shared[key] = []
                    else:
                        self.shared[key] = None

            # Clear variable manager scopes (except core system variables)
            if hasattr(self, 'variable_manager'):
                # Clear user, results, tasks scopes
                self.variable_manager.register_scope('user', {})
                self.variable_manager.register_scope('results', {})
                self.variable_manager.register_scope('tasks', {})
                # Reset cache
                self.variable_manager._cache.clear()

            # Reset execution state
            self.is_running = False
            self.is_paused = False
            self.shared["system_status"] = "idle"

            # Clear progress tracking
            if hasattr(self, 'progress_tracker'):
                self.progress_tracker.reset_session_metrics()

            return True

        except Exception as e:
            eprint(f"Failed to clear context: {e}")
            return False

    async def clean_memory(self, deep_clean: bool = False) -> bool:
        """Clean memory and context of the agent"""
        try:
            # Clear current context first
            self.clear_context()

            # Clean world model
            self.shared["world_model"] = {}
            self.world_model = {}

            # Clean performance metrics
            self.shared["performance_metrics"] = {}

            # Deep clean session storage
            session_managers = self.shared.get("session_managers", {})
            if session_managers:
                for _manager_name, manager in session_managers.items():
                    if hasattr(manager, 'clear_all_history'):
                        await manager.clear_all_history()
                    elif hasattr(manager, 'clear_history'):
                        manager.clear_history()

            # Clear session managers entirely
            self.shared["session_managers"] = {}
            self.shared["session_initialized"] = False

            # Clean variable manager completely
            if hasattr(self, 'variable_manager'):
                # Reinitialize with clean state
                self.variable_manager = VariableManager({}, self.shared)
                self._setup_variable_scopes()

            # Clean tool analysis cache if deep clean
            if deep_clean:
                self._tool_capabilities = {}
                self._tool_analysis_cache = {}

                # Remove tool analysis file
                if hasattr(self, 'tool_analysis_file') and os.path.exists(self.tool_analysis_file):
                    try:
                        os.remove(self.tool_analysis_file)
                        rprint("Removed tool analysis cache file")
                    except:
                        pass

            # Clean checkpoint data
            self.checkpoint_data = {}
            self.last_checkpoint = None

            # Clean execution history
            if hasattr(self.task_flow, 'executor_node'):
                self.task_flow.executor_node.execution_history = []
                self.task_flow.executor_node.results_store = {}

            # Clean context manager sessions
            if hasattr(self.task_flow, 'context_manager'):
                self.task_flow.context_manager.session_managers = {}

            # Clean LLM call statistics
            self.shared.pop("llm_call_stats", None)

            # Force garbage collection
            import gc
            gc.collect()

            rprint(f"Memory cleaned (deep_clean: {deep_clean})")
            return True

        except Exception as e:
            eprint(f"Failed to clean memory: {e}")
            return False

    async def close(self):
        """Clean shutdown"""
        self.is_running = False
        self._shutdown_event.set()

        # Create final checkpoint
        if self.enable_pause_resume:
            checkpoint = await self._create_checkpoint()
            await self._save_checkpoint(checkpoint, "final_checkpoint.pkl")

        # Shutdown executor
        self.executor.shutdown(wait=True)

        # Close servers
        if self.a2a_server:
            await self.a2a_server.close()

        if self.mcp_server:
            await self.mcp_server.close()

        if hasattr(self, '_mcp_session_manager'):
            await self._mcp_session_manager.cleanup_all()

        rprint("Agent shutdown complete")

    @property
    def total_cost(self) -> float:
        """Get total cost if budget manager available"""
        if hasattr(self.amd, 'budget_manager') and self.amd.budget_manager:
            return getattr(self.amd.budget_manager, 'total_cost', 0.0)
        return 0.0

    def status(self, pretty_print: bool = False) -> dict[str, Any] | str:
        """Get comprehensive agent status with optional pretty printing"""

        # Core status information
        base_status = {
            "agent_info": {
                "name": self.amd.name,
                "version": "2.0",
                "type": "FlowAgent"
            },
            "runtime_status": {
                "status": self.shared.get("system_status", "idle"),
                "is_running": self.is_running,
                "is_paused": self.is_paused,
                "uptime_seconds": (datetime.now() - getattr(self, '_start_time', datetime.now())).total_seconds()
            },
            "task_execution": {
                "total_tasks": len(self.shared.get("tasks", {})),
                "active_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "running"]),
                "completed_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "completed"]),
                "failed_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "failed"]),
                "plan_adaptations": self.shared.get("plan_adaptations", 0)
            },
            "conversation": {
                "turns": len(self.shared.get("conversation_history", [])),
                "session_id": self.shared.get("session_id", self.active_session),
                "current_user": self.shared.get("user_id"),
                "last_query": self.shared.get("current_query", "")[:100] + "..." if len(
                    self.shared.get("current_query", "")) > 100 else self.shared.get("current_query", "")
            },
            "capabilities": {
                "available_tools": len(self.shared.get("available_tools", [])),
                "tool_names": list(self.shared.get("available_tools", [])),
                "analyzed_tools": len(self._tool_capabilities),
                "world_model_size": len(self.shared.get("world_model", {})),
                "intelligence_level": "high" if self._tool_capabilities else "basic"
            },
            "memory_context": {
                "session_initialized": self.shared.get("session_initialized", False),
                "session_managers": len(self.shared.get("session_managers", {})),
                "context_system": "advanced_session_aware" if self.shared.get("session_initialized") else "basic",
                "variable_scopes": len(self.variable_manager.get_scope_info()) if hasattr(self,
                                                                                          'variable_manager') else 0
            },
            "performance": {
                "total_cost": self.total_cost,
                "checkpoint_enabled": self.enable_pause_resume,
                "last_checkpoint": self.last_checkpoint.isoformat() if self.last_checkpoint else None,
                "max_parallel_tasks": self.max_parallel_tasks
            },
            "servers": {
                "a2a_server": self.a2a_server is not None,
                "mcp_server": self.mcp_server is not None,
                "server_count": sum([self.a2a_server is not None, self.mcp_server is not None])
            },
            "configuration": {
                "fast_llm_model": self.amd.fast_llm_model,
                "complex_llm_model": self.amd.complex_llm_model,
                "use_fast_response": getattr(self.amd, 'use_fast_response', False),
                "max_input_tokens": getattr(self.amd, 'max_input_tokens', 8000),
                "persona_configured": self.amd.persona is not None,
                "format_config": bool(getattr(self.amd.persona, 'format_config', None)) if self.amd.persona else False
            }
        }

        # Add detailed execution summary if tasks exist
        tasks = self.shared.get("tasks", {})
        if tasks:
            task_types_used = {}
            tools_used = []
            execution_timeline = []

            for task_id, task in tasks.items():
                # Count task types
                task_type = getattr(task, 'type', 'unknown')
                task_types_used[task_type] = task_types_used.get(task_type, 0) + 1

                # Collect tools used
                if hasattr(task, 'tool_name') and task.tool_name:
                    tools_used.append(task.tool_name)

                # Timeline info
                if hasattr(task, 'started_at') and task.started_at:
                    timeline_entry = {
                        "task_id": task_id,
                        "type": task_type,
                        "started": task.started_at.isoformat(),
                        "status": getattr(task, 'status', 'unknown')
                    }
                    if hasattr(task, 'completed_at') and task.completed_at:
                        timeline_entry["completed"] = task.completed_at.isoformat()
                        timeline_entry["duration"] = (task.completed_at - task.started_at).total_seconds()
                    execution_timeline.append(timeline_entry)

            base_status["task_execution"].update({
                "task_types_used": task_types_used,
                "tools_used": list(set(tools_used)),
                "execution_timeline": execution_timeline[-5:]  # Last 5 tasks
            })

        # Add context statistics
        if hasattr(self.task_flow, 'context_manager'):
            context_manager = self.task_flow.context_manager
            base_status["memory_context"].update({
                "compression_threshold": context_manager.compression_threshold,
                "max_tokens": context_manager.max_tokens,
                "active_context_sessions": len(getattr(context_manager, 'session_managers', {}))
            })

        # Add variable system info
        if hasattr(self, 'variable_manager'):
            available_vars = self.variable_manager.get_available_variables()
            scope_info = self.variable_manager.get_scope_info()

            base_status["variable_system"] = {
                "total_scopes": len(scope_info),
                "scope_names": list(scope_info.keys()),
                "total_variables": sum(len(vars) for vars in available_vars.values()),
                "scope_details": {
                    scope: {"type": info["type"], "variables": len(available_vars.get(scope, {}))}
                    for scope, info in scope_info.items()
                }
            }

        # Add format quality info if available
        quality_assessment = self.shared.get("quality_assessment", {})
        if quality_assessment:
            quality_details = quality_assessment.get("quality_details", {})
            base_status["format_quality"] = {
                "overall_score": quality_details.get("total_score", 0.0),
                "format_adherence": quality_details.get("format_adherence", 0.0),
                "length_adherence": quality_details.get("length_adherence", 0.0),
                "content_quality": quality_details.get("base_quality", 0.0),
                "assessment": quality_assessment.get("quality_assessment", "unknown"),
                "has_suggestions": bool(quality_assessment.get("suggestions", []))
            }

        # Add LLM usage statistics
        llm_stats = self.shared.get("llm_call_stats", {})
        if llm_stats:
            base_status["llm_usage"] = {
                "total_calls": llm_stats.get("total_calls", 0),
                "context_compression_rate": llm_stats.get("context_compression_rate", 0.0),
                "average_context_tokens": llm_stats.get("context_tokens_used", 0) / max(llm_stats.get("total_calls", 1),
                                                                                        1),
                "total_tokens_used": llm_stats.get("total_tokens_used", 0)
            }

        # Add timestamp
        base_status["timestamp"] = datetime.now().isoformat()

        if not pretty_print:
            return base_status

        # Pretty print using EnhancedVerboseOutput
        try:
            from toolboxv2.mods.isaa.extras.verbose_output import EnhancedVerboseOutput
            verbose_output = EnhancedVerboseOutput(verbose=True)

            # Header
            verbose_output.log_header(f"Agent Status: {base_status['agent_info']['name']}")

            # Runtime Status
            status_color = {
                "running": "SUCCESS",
                "paused": "WARNING",
                "idle": "INFO",
                "error": "ERROR"
            }.get(base_status["runtime_status"]["status"], "INFO")

            getattr(verbose_output, f"print_{status_color.lower()}")(
                f"Status: {base_status['runtime_status']['status'].upper()}"
            )

            # Task Execution Summary
            task_exec = base_status["task_execution"]
            if task_exec["total_tasks"] > 0:
                verbose_output.formatter.print_section(
                    "Task Execution",
                    f"Total: {task_exec['total_tasks']} | "
                    f"Completed: {task_exec['completed_tasks']} | "
                    f"Failed: {task_exec['failed_tasks']} | "
                    f"Active: {task_exec['active_tasks']}\n"
                    f"Adaptations: {task_exec['plan_adaptations']}"
                )

                if task_exec.get("tools_used"):
                    verbose_output.formatter.print_section(
                        "Tools Used",
                        ", ".join(task_exec["tools_used"])
                    )

            # Capabilities
            caps = base_status["capabilities"]
            verbose_output.formatter.print_section(
                "Capabilities",
                f"Intelligence Level: {caps['intelligence_level']}\n"
                f"Available Tools: {caps['available_tools']}\n"
                f"Analyzed Tools: {caps['analyzed_tools']}\n"
                f"World Model Size: {caps['world_model_size']}"
            )

            # Memory & Context
            memory = base_status["memory_context"]
            verbose_output.formatter.print_section(
                "Memory & Context",
                f"Context System: {memory['context_system']}\n"
                f"Session Managers: {memory['session_managers']}\n"
                f"Variable Scopes: {memory['variable_scopes']}\n"
                f"Session Initialized: {memory['session_initialized']}"
            )

            # Configuration
            config = base_status["configuration"]
            verbose_output.formatter.print_section(
                "Configuration",
                f"Fast LLM: {config['fast_llm_model']}\n"
                f"Complex LLM: {config['complex_llm_model']}\n"
                f"Max Tokens: {config['max_input_tokens']}\n"
                f"Persona: {'Configured' if config['persona_configured'] else 'Default'}\n"
                f"Format Config: {'Active' if config['format_config'] else 'None'}"
            )

            # Performance
            perf = base_status["performance"]
            verbose_output.formatter.print_section(
                "Performance",
                f"Total Cost: ${perf['total_cost']:.4f}\n"
                f"Checkpointing: {'Enabled' if perf['checkpoint_enabled'] else 'Disabled'}\n"
                f"Max Parallel Tasks: {perf['max_parallel_tasks']}\n"
                f"Last Checkpoint: {perf['last_checkpoint'] or 'None'}"
            )

            # Variable System Details
            if "variable_system" in base_status:
                var_sys = base_status["variable_system"]
                scope_details = []
                for scope, details in var_sys["scope_details"].items():
                    scope_details.append(f"{scope}: {details['variables']} variables ({details['type']})")

                verbose_output.formatter.print_section(
                    "Variable System",
                    f"Total Scopes: {var_sys['total_scopes']}\n"
                    f"Total Variables: {var_sys['total_variables']}\n" +
                    "\n".join(scope_details)
                )

            # Format Quality
            if "format_quality" in base_status:
                quality = base_status["format_quality"]
                verbose_output.formatter.print_section(
                    "Format Quality",
                    f"Overall Score: {quality['overall_score']:.2f}\n"
                    f"Format Adherence: {quality['format_adherence']:.2f}\n"
                    f"Length Adherence: {quality['length_adherence']:.2f}\n"
                    f"Content Quality: {quality['content_quality']:.2f}\n"
                    f"Assessment: {quality['assessment']}"
                )

            # LLM Usage
            if "llm_usage" in base_status:
                llm = base_status["llm_usage"]
                verbose_output.formatter.print_section(
                    "LLM Usage Statistics",
                    f"Total Calls: {llm['total_calls']}\n"
                    f"Avg Context Tokens: {llm['average_context_tokens']:.1f}\n"
                    f"Total Tokens: {llm['total_tokens_used']}\n"
                    f"Compression Rate: {llm['context_compression_rate']:.2%}"
                )

            # Servers
            servers = base_status["servers"]
            if servers["server_count"] > 0:
                server_status = []
                if servers["a2a_server"]:
                    server_status.append("A2A Server: Active")
                if servers["mcp_server"]:
                    server_status.append("MCP Server: Active")

                verbose_output.formatter.print_section(
                    "Servers",
                    "\n".join(server_status)
                )

            verbose_output.print_separator()
            verbose_output.print_info(f"Status generated at: {base_status['timestamp']}")

            return "Status printed above"

        except Exception:
            # Fallback to JSON if pretty print fails
            import json
            return json.dumps(base_status, indent=2, default=str)

    @property
    def tool_registry(self):
        return self._tool_registry

    def __rshift__(self, other):
        return Chain(self) >> other

    def __add__(self, other):
        return Chain(self) + other

    def __and__(self, other):
        return Chain(self) & other

    def __mod__(self, other):
        """Implements % operator for conditional branching"""
        return ConditionalChain(self, other)
total_cost property

Get total cost if budget manager available

__mod__(other)

Implements % operator for conditional branching

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10803
10804
10805
def __mod__(self, other):
    """Implements % operator for conditional branching"""
    return ConditionalChain(self, other)
a_format_class(pydantic_model, prompt, message_context=None, max_retries=2, auto_context=True, session_id=None, **kwargs) async

State-of-the-art LLM-based structured data formatting using Pydantic models.

Parameters:

Name Type Description Default
pydantic_model type[BaseModel]

The Pydantic model class to structure the response

required
prompt str

The main prompt for the LLM

required
message_context list[dict]

Optional conversation context messages

None
max_retries int

Maximum number of retry attempts

2

Returns:

Name Type Description
dict dict[str, Any]

Validated structured data matching the Pydantic model

Raises:

Type Description
ValidationError

If the LLM response cannot be validated against the model

RuntimeError

If all retry attempts fail

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10054
10055
10056
10057
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067
10068
10069
10070
10071
10072
10073
10074
10075
10076
10077
10078
10079
10080
10081
10082
10083
10084
10085
10086
10087
10088
10089
10090
10091
10092
10093
10094
10095
10096
10097
10098
10099
10100
10101
10102
10103
10104
10105
10106
10107
10108
10109
10110
10111
10112
10113
10114
10115
10116
10117
10118
10119
10120
10121
10122
10123
10124
10125
10126
10127
10128
10129
10130
10131
10132
10133
10134
10135
10136
10137
10138
10139
10140
10141
10142
10143
10144
10145
10146
10147
10148
10149
10150
10151
10152
10153
10154
10155
10156
10157
10158
10159
10160
10161
10162
10163
10164
10165
10166
10167
10168
10169
10170
10171
10172
10173
10174
10175
10176
10177
10178
10179
10180
10181
10182
10183
10184
10185
10186
10187
    async def a_format_class(self,
                             pydantic_model: type[BaseModel],
                             prompt: str,
                             message_context: list[dict] = None,
                             max_retries: int = 2, auto_context=True, session_id: str = None, **kwargs) -> dict[str, Any]:
        """
        State-of-the-art LLM-based structured data formatting using Pydantic models.

        Args:
            pydantic_model: The Pydantic model class to structure the response
            prompt: The main prompt for the LLM
            message_context: Optional conversation context messages
            max_retries: Maximum number of retry attempts

        Returns:
            dict: Validated structured data matching the Pydantic model

        Raises:
            ValidationError: If the LLM response cannot be validated against the model
            RuntimeError: If all retry attempts fail
        """

        if not LITELLM_AVAILABLE:
            raise RuntimeError("LiteLLM is required for structured formatting but not available")

        if session_id and self.active_session != session_id:
            self.active_session = session_id
        # Generate schema documentation
        schema = pydantic_model.model_json_schema()
        model_name = pydantic_model.__name__

        # Create enhanced prompt with schema
        enhanced_prompt = f"""
    {prompt}

    CRITICAL FORMATTING REQUIREMENTS:
    1. Respond ONLY in valid YAML format
    2. Follow the exact schema structure provided
    3. Use appropriate data types (strings, lists, numbers, booleans)
    4. Include ALL required fields
    5. No additional comments, explanations, or text outside the YAML

    SCHEMA FOR {model_name}:
    {yaml.dump(schema, default_flow_style=False, indent=2)}

    EXAMPLE OUTPUT FORMAT:
    ```yaml
    # Your response here following the schema exactly
    field_name: "value"
    list_field:
      - "item1"
      - "item2"
    boolean_field: true
    number_field: 42
Respond in YAML format only:
"""
        # Prepare messages
        messages = []
        if message_context:
            messages.extend(message_context)
        messages.append({"role": "user", "content": enhanced_prompt})

        # Retry logic with progressive adjustments
        last_error = None

        for attempt in range(max_retries + 1):
            try:
                # Adjust parameters based on attempt
                temperature = 0.1 + (attempt * 0.1)  # Increase temperature slightly on retries
                max_tokens = min(2000 + (attempt * 500), 4000)  # Increase token limit on retries

                rprint(f"[{model_name}] Attempt {attempt + 1}/{max_retries + 1} (temp: {temperature})")

                # Generate LLM response
                response = await self.a_run_llm_completion(
                    model=self.amd.complex_llm_model,
                    messages=messages,
                    with_context=auto_context,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    task_id=f"format_{model_name.lower()}_{attempt}"
                )

                if not response or not response.strip():
                    raise ValueError("Empty response from LLM")

                # Extract YAML content with multiple fallback strategies
                yaml_content = self._extract_yaml_content(response)

                if not yaml_content:
                    raise ValueError("No valid YAML content found in response")

                # Parse YAML
                try:
                    parsed_data = yaml.safe_load(yaml_content)
                except yaml.YAMLError as e:
                    raise ValueError(f"Invalid YAML syntax: {e}")

                if not isinstance(parsed_data, dict):
                    raise ValueError(f"Expected dict, got {type(parsed_data)}")

                # Validate against Pydantic model
                try:
                    validated_instance = pydantic_model.model_validate(parsed_data)
                    validated_data = validated_instance.model_dump()

                    rprint(f"✅ Successfully formatted {model_name} on attempt {attempt + 1}")
                    return validated_data

                except ValidationError as e:
                    detailed_errors = []
                    for error in e.errors():
                        field_path = " -> ".join(str(x) for x in error['loc'])
                        detailed_errors.append(f"Field '{field_path}': {error['msg']}")

                    error_msg = "Validation failed:\n" + "\n".join(detailed_errors)
                    raise ValueError(error_msg)

            except Exception as e:
                last_error = e
                wprint(f"[{model_name}] Attempt {attempt + 1} failed: {str(e)}")

                if attempt < max_retries:
                    # Add error feedback for next attempt
                    error_feedback = f"\n\nPREVIOUS ATTEMPT FAILED: {str(e)}\nPlease correct the issues and provide valid YAML matching the schema exactly."
                    messages[-1]["content"] = enhanced_prompt + error_feedback

                    # Brief delay before retry
                    await asyncio.sleep(0.5 * (attempt + 1))
                else:
                    eprint(f"[{model_name}] All {max_retries + 1} attempts failed")

        # All attempts failed
        raise RuntimeError(f"Failed to format {model_name} after {max_retries + 1} attempts. Last error: {last_error}")
a_run(query, session_id='default', user_id=None, stream_callback=None, remember=True, **kwargs) async

Main entry point für Agent-Ausführung mit UnifiedContextManager

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8212
8213
8214
8215
8216
8217
8218
8219
8220
8221
8222
8223
8224
8225
8226
8227
8228
8229
8230
8231
8232
8233
8234
8235
8236
8237
8238
8239
8240
8241
8242
8243
8244
8245
8246
8247
8248
8249
8250
8251
8252
8253
8254
8255
8256
8257
8258
8259
8260
8261
8262
8263
8264
8265
8266
8267
8268
8269
8270
8271
8272
8273
8274
8275
8276
8277
8278
8279
8280
8281
8282
8283
8284
8285
8286
8287
8288
8289
8290
8291
8292
8293
8294
8295
8296
8297
8298
8299
8300
8301
8302
8303
8304
8305
8306
8307
8308
8309
8310
8311
8312
8313
8314
8315
8316
8317
8318
8319
8320
8321
8322
8323
8324
8325
8326
8327
8328
8329
8330
8331
8332
8333
8334
8335
8336
8337
8338
8339
8340
8341
8342
8343
8344
8345
8346
8347
8348
8349
8350
8351
8352
8353
8354
8355
8356
8357
8358
8359
async def a_run(
    self,
    query: str,
    session_id: str = "default",
    user_id: str = None,
    stream_callback: Callable = None,
    remember: bool = True,
    **kwargs
) -> str:
    """Main entry point für Agent-Ausführung mit UnifiedContextManager"""

    execution_start = self.progress_tracker.start_timer("total_execution")
    self.active_session = session_id
    result = None
    await self.progress_tracker.emit_event(ProgressEvent(
        event_type="execution_start",
        timestamp=time.time(),
        status=NodeStatus.RUNNING,
        node_name="FlowAgent",
        session_id=session_id,
        metadata={"query": query, "user_id": user_id}
    ))

    try:
        #Initialize or get session über UnifiedContextManager
        await self.initialize_session_context(session_id, max_history=200)

        #Store user message immediately in ChatSession wenn remember=True
        if remember:
            await self.context_manager.add_interaction(
                session_id,
                'user',
                query,
                metadata={"user_id": user_id}
            )

        # Set user context variables
        timestamp = datetime.now()
        self.variable_manager.register_scope('user', {
            'id': user_id,
            'session': session_id,
            'query': query,
            'timestamp': timestamp.isoformat()
        })

        # Update system variables
        self.variable_manager.set('system_context.timestamp', {'isoformat': timestamp.isoformat()})
        self.variable_manager.set('system_context.current_session', session_id)
        self.variable_manager.set('system_context.current_user', user_id)
        self.variable_manager.set('system_context.last_query', query)

        # Initialize with tool awareness
        await self.initialize_context_awareness()

        # VEREINFACHT: Prepare execution context - weniger Daten duplizieren
        self.shared.update({
            "current_query": query,
            "session_id": session_id,
            "user_id": user_id,
            "stream_callback": stream_callback,
            "remember": remember,
            # CENTRAL: Context Manager ist die primäre Context-Quelle
            "context_manager": self.context_manager,
            "variable_manager": self.variable_manager
        })

        # Set LLM models in shared context
        self.shared['fast_llm_model'] = self.amd.fast_llm_model
        self.shared['complex_llm_model'] = self.amd.complex_llm_model
        self.shared['persona_config'] = self.amd.persona
        self.shared['use_fast_response'] = self.amd.use_fast_response

        # Set system status
        self.shared["system_status"] = "running"
        self.is_running = True

        # Execute main orchestration flow
        result = await self._orchestrate_execution()

        #Store assistant response in ChatSession wenn remember=True
        if remember:
            await self.context_manager.add_interaction(
                session_id,
                'assistant',
                result,
                metadata={"user_id": user_id, "execution_duration": time.time() - execution_start}
            )

        total_duration = self.progress_tracker.end_timer("total_execution")

        await self.progress_tracker.emit_event(ProgressEvent(
            event_type="execution_complete",
            timestamp=time.time(),
            node_name="FlowAgent",
            status=NodeStatus.COMPLETED,
            node_duration=total_duration,
            session_id=session_id,
            metadata={
                "result_length": len(result),
                "summary": self.progress_tracker.get_summary(),
                "remembered": remember
            }
        ))

        # Checkpoint if needed
        if self.enable_pause_resume:
            await self._maybe_checkpoint()

        return result

    except Exception as e:
        eprint(f"Agent execution failed: {e}", exc_info=True)
        error_response = f"I encountered an error: {str(e)}"
        result = error_response
        import traceback
        print(traceback.format_exc())

        # Store error in ChatSession wenn remember=True
        if remember:
            await self.context_manager.add_interaction(
                session_id,
                'assistant',
                error_response,
                metadata={
                    "user_id": user_id,
                    "error": True,
                    "error_type": type(e).__name__
                }
            )

        total_duration = self.progress_tracker.end_timer("total_execution")

        await self.progress_tracker.emit_event(ProgressEvent(
            event_type="error",
            timestamp=time.time(),
            node_name="FlowAgent",
            status=NodeStatus.FAILED,
            node_duration=total_duration,
            session_id=session_id,
            metadata={"error": str(e), "error_type": type(e).__name__}
        ))

        return error_response

    finally:
        self.shared["system_status"] = "idle"
        self.is_running = False
        self.active_session = None
a_run_with_format(query, response_format='frei-text', text_length='chat-conversation', custom_instructions='', **kwargs) async

Führe Agent mit spezifischem Format aus

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8465
8466
8467
8468
8469
8470
8471
8472
8473
8474
8475
8476
8477
8478
8479
8480
8481
8482
8483
8484
8485
async def a_run_with_format(
    self,
    query: str,
    response_format: str = "frei-text",
    text_length: str = "chat-conversation",
    custom_instructions: str = "",
    **kwargs
) -> str:
    """Führe Agent mit spezifischem Format aus"""

    # Temporäre Format-Einstellung
    original_persona = self.amd.persona

    try:
        self.set_response_format(response_format, text_length, custom_instructions)
        response = await self.a_run(query, **kwargs)
        return response
    finally:
        # Restore original persona
        self.amd.persona = original_persona
        self.shared["persona_config"] = original_persona
add_custom_flow(flow, name)

Add a custom flow for dynamic execution

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
 9997
 9998
 9999
10000
def add_custom_flow(self, flow: AsyncFlow, name: str):
    """Add a custom flow for dynamic execution"""
    self.add_tool(flow.run_async, name=name, description=f"Custom flow: {flow.__class__.__name__}")
    rprint(f"Custom node added: {name}")
add_first_class_tool(tool_func, name, description)

Add a first-class meta-tool that can be used by the LLMReasonerNode. These are different from regular tools - they control agent sub-systems.

Parameters:

Name Type Description Default
tool_func Callable

The function to register as a meta-tool

required
name str

Name of the meta-tool

required
description str

Description of when and how to use it

required
Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9776
9777
9778
9779
9780
9781
9782
9783
9784
9785
9786
9787
9788
9789
9790
9791
9792
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802
9803
9804
def add_first_class_tool(self, tool_func: Callable, name: str, description: str):
    """
    Add a first-class meta-tool that can be used by the LLMReasonerNode.
    These are different from regular tools - they control agent sub-systems.

    Args:
        tool_func: The function to register as a meta-tool
        name: Name of the meta-tool
        description: Description of when and how to use it
    """

    # Validate the tool function
    if not callable(tool_func):
        raise ValueError("Tool function must be callable")

    # Register in the reasoner's meta-tool registry (if reasoner exists)
    if hasattr(self.task_flow, 'llm_reasoner'):
        if not hasattr(self.task_flow.llm_reasoner, 'meta_tools_registry'):
            self.task_flow.llm_reasoner.meta_tools_registry = {}

        self.task_flow.llm_reasoner.meta_tools_registry[name] = {
            "function": tool_func,
            "description": description,
            "added_at": datetime.now().isoformat()
        }

        rprint(f"First-class meta-tool added: {name}")
    else:
        wprint("LLMReasonerNode not available for first-class tool registration")
add_tool(tool_func, name=None, description=None, is_new=False) async

Enhanced tool addition with intelligent analysis

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9806
9807
9808
9809
9810
9811
9812
9813
9814
9815
9816
9817
9818
9819
9820
9821
9822
9823
9824
9825
9826
9827
9828
9829
9830
9831
9832
9833
9834
9835
async def add_tool(self, tool_func: Callable, name: str = None, description: str = None, is_new=False):
    """Enhanced tool addition with intelligent analysis"""
    if not asyncio.iscoroutinefunction(tool_func):
        @wraps(tool_func)
        async def async_wrapper(*args, **kwargs):
            return await asyncio.to_thread(tool_func, *args, **kwargs)

        effective_func = async_wrapper
    else:
        effective_func = tool_func

    tool_name = name or effective_func.__name__
    tool_description = description or effective_func.__doc__ or "No description"

    # Store in registry
    self._tool_registry[tool_name] = {
        "function": effective_func,
        "description": tool_description,
        "args_schema": get_args_schema(tool_func)
    }

    # Add to available tools list
    if tool_name not in self.shared["available_tools"]:
        self.shared["available_tools"].append(tool_name)

    # Intelligent tool analysis
    if is_new:
        await self._analyze_tool_capabilities(tool_name, tool_description)

    rprint(f"Tool added with analysis: {tool_name}")
arun_function(function_name, *args, **kwargs) async

Asynchronously finds a function by its string name, executes it with the given arguments, and returns the result.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10006
10007
10008
10009
10010
10011
10012
10013
10014
10015
10016
10017
10018
10019
10020
10021
10022
10023
10024
10025
10026
10027
10028
10029
10030
10031
10032
10033
10034
10035
10036
10037
10038
10039
10040
10041
10042
10043
10044
10045
10046
10047
10048
10049
10050
async def arun_function(self, function_name: str, *args, **kwargs) -> Any:
    """
    Asynchronously finds a function by its string name, executes it with
    the given arguments, and returns the result.
    """
    rprint(f"Attempting to run function: {function_name} with args: {args}, kwargs: {kwargs}")
    target_function = self.get_tool_by_name(function_name)

    start_time = time.perf_counter()
    if not target_function:
        raise ValueError(f"Function '{function_name}' not found in the {self.amd.name}'s registered tools.")

    try:
        if asyncio.iscoroutinefunction(target_function):
            result = await target_function(*args, **kwargs)
        else:
            # If the function is not async, run it in a thread pool
            loop = asyncio.get_running_loop()
            result = await loop.run_in_executor(None, lambda: target_function(*args, **kwargs))

        if asyncio.iscoroutine(result):
            result = await result

        if self.progress_tracker:
            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="tool_call",  # Vereinheitlicht zu tool_call
                node_name="FlowAgent",
                status=NodeStatus.COMPLETED,
                success=True,
                duration=time.perf_counter() - start_time,
                tool_name=function_name,
                tool_args=kwargs,
                tool_result=result,
                is_meta_tool=False,  # Klarstellen, dass es kein Meta-Tool ist
                metadata={
                    "result_type": type(result).__name__,
                    "result_length": len(str(result))
                }
            ))
        rprint(f"Function {function_name} completed successfully with result: {result}")
        return result

    except Exception as e:
        eprint(f"Function {function_name} execution failed: {e}")
        raise
clean_memory(deep_clean=False) async

Clean memory and context of the agent

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10392
10393
10394
10395
10396
10397
10398
10399
10400
10401
10402
10403
10404
10405
10406
10407
10408
10409
10410
10411
10412
10413
10414
10415
10416
10417
10418
10419
10420
10421
10422
10423
10424
10425
10426
10427
10428
10429
10430
10431
10432
10433
10434
10435
10436
10437
10438
10439
10440
10441
10442
10443
10444
10445
10446
10447
10448
10449
10450
10451
10452
10453
10454
10455
10456
10457
10458
10459
10460
10461
10462
async def clean_memory(self, deep_clean: bool = False) -> bool:
    """Clean memory and context of the agent"""
    try:
        # Clear current context first
        self.clear_context()

        # Clean world model
        self.shared["world_model"] = {}
        self.world_model = {}

        # Clean performance metrics
        self.shared["performance_metrics"] = {}

        # Deep clean session storage
        session_managers = self.shared.get("session_managers", {})
        if session_managers:
            for _manager_name, manager in session_managers.items():
                if hasattr(manager, 'clear_all_history'):
                    await manager.clear_all_history()
                elif hasattr(manager, 'clear_history'):
                    manager.clear_history()

        # Clear session managers entirely
        self.shared["session_managers"] = {}
        self.shared["session_initialized"] = False

        # Clean variable manager completely
        if hasattr(self, 'variable_manager'):
            # Reinitialize with clean state
            self.variable_manager = VariableManager({}, self.shared)
            self._setup_variable_scopes()

        # Clean tool analysis cache if deep clean
        if deep_clean:
            self._tool_capabilities = {}
            self._tool_analysis_cache = {}

            # Remove tool analysis file
            if hasattr(self, 'tool_analysis_file') and os.path.exists(self.tool_analysis_file):
                try:
                    os.remove(self.tool_analysis_file)
                    rprint("Removed tool analysis cache file")
                except:
                    pass

        # Clean checkpoint data
        self.checkpoint_data = {}
        self.last_checkpoint = None

        # Clean execution history
        if hasattr(self.task_flow, 'executor_node'):
            self.task_flow.executor_node.execution_history = []
            self.task_flow.executor_node.results_store = {}

        # Clean context manager sessions
        if hasattr(self.task_flow, 'context_manager'):
            self.task_flow.context_manager.session_managers = {}

        # Clean LLM call statistics
        self.shared.pop("llm_call_stats", None)

        # Force garbage collection
        import gc
        gc.collect()

        rprint(f"Memory cleaned (deep_clean: {deep_clean})")
        return True

    except Exception as e:
        eprint(f"Failed to clean memory: {e}")
        return False
cleanup_session_context(session_id=None, keep_count=100, remove_old_snapshots=True) async

Cleanup session context by removing old snapshots and entries

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9480
9481
9482
9483
9484
9485
9486
9487
9488
9489
9490
9491
9492
9493
9494
9495
9496
9497
9498
9499
9500
9501
9502
9503
9504
9505
9506
9507
9508
9509
9510
9511
9512
9513
9514
9515
9516
9517
9518
9519
9520
9521
9522
9523
9524
9525
9526
9527
9528
9529
9530
9531
9532
9533
9534
9535
9536
9537
9538
9539
9540
9541
9542
9543
9544
9545
9546
9547
9548
9549
9550
9551
9552
9553
async def cleanup_session_context(self, session_id: str = None, keep_count: int = 100,
                                  remove_old_snapshots: bool = True) -> dict[str, Any]:
    """Cleanup session context by removing old snapshots and entries"""
    try:
        session_id = session_id or self.shared.get("session_id", "default")

        if not self.context_manager:
            return {"error": "Context manager not available"}

        session = self.context_manager.session_managers.get(session_id)
        if not session or not hasattr(session, 'history'):
            return {"error": f"Session {session_id} not found or has no history"}

        cleanup_stats = {
            "original_message_count": len(session.history),
            "context_snapshots_removed": 0,
            "context_entries_removed": 0,
            "regular_messages_kept": 0,
            "cleanup_performed": False
        }

        if len(session.history) <= keep_count:
            return {**cleanup_stats, "message": "No cleanup needed"}

        # Separate different types of messages
        regular_messages = []
        context_snapshots = []
        context_entries = []

        for message in session.history:
            metadata = message.get("metadata", {})

            if metadata.get("is_context_snapshot"):
                context_snapshots.append(message)
            elif metadata.get("is_context_entry"):
                context_entries.append(message)
            else:
                regular_messages.append(message)

        # Keep most recent regular messages
        messages_to_keep = regular_messages[-keep_count:]
        cleanup_stats["regular_messages_kept"] = len(messages_to_keep)

        # Keep most recent context snapshots (if not removing)
        if not remove_old_snapshots:
            recent_snapshots = context_snapshots[-5:]  # Keep last 5 snapshots
            messages_to_keep.extend(recent_snapshots)
        else:
            cleanup_stats["context_snapshots_removed"] = len(context_snapshots)

        # Keep persistent context entries
        persistent_entries = [
            entry for entry in context_entries
            if entry.get("persistent", True)
        ]
        messages_to_keep.extend(persistent_entries)
        cleanup_stats["context_entries_removed"] = len(context_entries) - len(persistent_entries)

        # Sort by timestamp and update session
        messages_to_keep.sort(key=lambda x: x.get("timestamp", ""))
        session.history = messages_to_keep

        cleanup_stats.update({
            "final_message_count": len(session.history),
            "cleanup_performed": True,
            "messages_removed": cleanup_stats["original_message_count"] - len(session.history)
        })

        rprint(f"Session cleanup completed: {cleanup_stats['messages_removed']} messages removed")
        return cleanup_stats

    except Exception as e:
        eprint(f"Failed to cleanup session context: {e}")
        return {"error": str(e)}
clear_context(session_id=None)

Clear context über UnifiedContextManager mit Session-spezifischer Unterstützung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10314
10315
10316
10317
10318
10319
10320
10321
10322
10323
10324
10325
10326
10327
10328
10329
10330
10331
10332
10333
10334
10335
10336
10337
10338
10339
10340
10341
10342
10343
10344
10345
10346
10347
10348
10349
10350
10351
10352
10353
10354
10355
10356
10357
10358
10359
10360
10361
10362
10363
10364
10365
10366
10367
10368
10369
10370
10371
10372
10373
10374
10375
10376
10377
10378
10379
10380
10381
10382
10383
10384
10385
10386
10387
10388
10389
10390
def clear_context(self, session_id: str = None) -> bool:
    """Clear context über UnifiedContextManager mit Session-spezifischer Unterstützung"""
    try:
        #Clear über Context Manager
        if session_id:
            # Clear specific session
            if session_id in self.context_manager.session_managers:
                session = self.context_manager.session_managers[session_id]
                if hasattr(session, 'history'):
                    session.history = []
                elif isinstance(session, dict) and 'history' in session:
                    session['history'] = []

                # Remove from session managers
                del self.context_manager.session_managers[session_id]

                # Clear variable manager scope for this session
                if self.variable_manager:
                    scope_name = f'session_{session_id}'
                    if scope_name in self.variable_manager.scopes:
                        del self.variable_manager.scopes[scope_name]

                rprint(f"Context cleared for session: {session_id}")
        else:
            # Clear all sessions
            for session_id, session in self.context_manager.session_managers.items():
                if hasattr(session, 'history'):
                    session.history = []
                elif isinstance(session, dict) and 'history' in session:
                    session['history'] = []

            self.context_manager.session_managers = {}
            rprint("Context cleared for all sessions")

        # Clear context cache
        self.context_manager._invalidate_cache(session_id)

        # Clear current execution context in shared
        context_keys_to_clear = [
            "current_query", "current_response", "current_plan", "tasks",
            "results", "task_plans", "session_data", "formatted_context",
            "synthesized_response", "quality_assessment", "plan_adaptations",
            "executor_performance", "llm_tool_conversation", "aggregated_context"
        ]

        for key in context_keys_to_clear:
            if key in self.shared:
                if isinstance(self.shared[key], dict):
                    self.shared[key] = {}
                elif isinstance(self.shared[key], list):
                    self.shared[key] = []
                else:
                    self.shared[key] = None

        # Clear variable manager scopes (except core system variables)
        if hasattr(self, 'variable_manager'):
            # Clear user, results, tasks scopes
            self.variable_manager.register_scope('user', {})
            self.variable_manager.register_scope('results', {})
            self.variable_manager.register_scope('tasks', {})
            # Reset cache
            self.variable_manager._cache.clear()

        # Reset execution state
        self.is_running = False
        self.is_paused = False
        self.shared["system_status"] = "idle"

        # Clear progress tracking
        if hasattr(self, 'progress_tracker'):
            self.progress_tracker.reset_session_metrics()

        return True

    except Exception as e:
        eprint(f"Failed to clear context: {e}")
        return False
close() async

Clean shutdown

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10464
10465
10466
10467
10468
10469
10470
10471
10472
10473
10474
10475
10476
10477
10478
10479
10480
10481
10482
10483
10484
10485
10486
10487
async def close(self):
    """Clean shutdown"""
    self.is_running = False
    self._shutdown_event.set()

    # Create final checkpoint
    if self.enable_pause_resume:
        checkpoint = await self._create_checkpoint()
        await self._save_checkpoint(checkpoint, "final_checkpoint.pkl")

    # Shutdown executor
    self.executor.shutdown(wait=True)

    # Close servers
    if self.a2a_server:
        await self.a2a_server.close()

    if self.mcp_server:
        await self.mcp_server.close()

    if hasattr(self, '_mcp_session_manager'):
        await self._mcp_session_manager.cleanup_all()

    rprint("Agent shutdown complete")
configure_persona_integration(apply_method='system_prompt', integration_level='light')

Configure how persona is applied

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8777
8778
8779
8780
8781
8782
8783
8784
def configure_persona_integration(self, apply_method: str = "system_prompt", integration_level: str = "light"):
    """Configure how persona is applied"""
    if self.amd.persona:
        self.amd.persona.apply_method = apply_method
        self.amd.persona.integration_level = integration_level
        rprint(f"Persona integration updated: {apply_method}, {integration_level}")
    else:
        wprint("No persona configured to update")
delete_old_checkpoints(keep_count=5, max_age_hours=168) async

Delete old checkpoints, keeping the most recent ones

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9699
9700
9701
9702
9703
9704
9705
9706
9707
9708
9709
9710
9711
9712
9713
9714
9715
9716
9717
9718
9719
9720
9721
9722
9723
9724
9725
9726
9727
9728
9729
9730
9731
9732
9733
9734
9735
9736
9737
9738
9739
9740
9741
9742
9743
9744
9745
9746
9747
9748
9749
9750
9751
9752
9753
9754
9755
9756
async def delete_old_checkpoints(self, keep_count: int = 5, max_age_hours: int = 168) -> dict[str, Any]:
    """Delete old checkpoints, keeping the most recent ones"""
    try:
        checkpoints = self.list_available_checkpoints(
            max_age_hours=max_age_hours * 2)  # Look further back for deletion

        deleted_count = 0
        deleted_size_kb = 0
        errors = []

        if len(checkpoints) > keep_count:
            # Keep the newest, delete the rest (except final checkpoint)
            to_delete = checkpoints[keep_count:]

            for checkpoint in to_delete:
                if checkpoint["checkpoint_type"] != "final":  # Never delete final checkpoint
                    try:
                        os.remove(checkpoint["filepath"])
                        deleted_count += 1
                        deleted_size_kb += checkpoint["file_size_kb"]
                        rprint(f"Deleted old checkpoint: {checkpoint['filename']}")
                    except Exception as e:
                        import traceback
                        print(traceback.format_exc())
                        errors.append(f"Failed to delete {checkpoint['filename']}: {e}")

        # Also delete checkpoints older than max_age_hours
        old_checkpoints = [cp for cp in checkpoints if
                           cp["age_hours"] > max_age_hours and cp["checkpoint_type"] != "final"]
        for checkpoint in old_checkpoints:
            if checkpoint not in checkpoints[keep_count:]:  # Don't double-delete
                try:
                    os.remove(checkpoint["filepath"])
                    deleted_count += 1
                    deleted_size_kb += checkpoint["file_size_kb"]
                    rprint(f"Deleted aged checkpoint: {checkpoint['filename']}")
                except Exception as e:
                    import traceback
                    print(traceback.format_exc())
                    errors.append(f"Failed to delete {checkpoint['filename']}: {e}")

        return {
            "success": True,
            "deleted_count": deleted_count,
            "freed_space_kb": round(deleted_size_kb, 1),
            "remaining_checkpoints": len(checkpoints) - deleted_count,
            "errors": errors
        }

    except Exception as e:
        import traceback
        print(traceback.format_exc())
        eprint(f"Failed to delete old checkpoints: {e}")
        return {
            "success": False,
            "error": str(e),
            "deleted_count": 0
        }
explain_reasoning_process() async

Erkläre den Reasoning-Prozess des Agenten

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8926
8927
8928
8929
8930
8931
8932
8933
8934
8935
8936
8937
8938
8939
8940
8941
8942
8943
8944
8945
8946
8947
8948
8949
8950
8951
8952
8953
8954
8955
8956
8957
8958
8959
8960
8961
8962
8963
8964
8965
8966
8967
8968
8969
8970
    async def explain_reasoning_process(self) -> str:
        """Erkläre den Reasoning-Prozess des Agenten"""
        if not LITELLM_AVAILABLE:
            return "Reasoning explanation requires LLM capabilities."

        summary = await self.get_task_execution_summary()

        prompt = f"""
Erkläre den Reasoning-Prozess dieses AI-Agenten in verständlicher Form:

## Ausführungszusammenfassung
- Total Tasks: {summary['total_tasks']}
- Erfolgreich: {len(summary['completed_tasks'])}
- Fehlgeschlagen: {len(summary['failed_tasks'])}
- Plan-Adaptationen: {summary['adaptations']}
- Verwendete Tools: {', '.join(set(summary['tools_used']))}
- Task-Typen: {summary['task_types_used']}

## Task-Details
Erfolgreiche Tasks:
{self._format_tasks_for_explanation(summary['completed_tasks'])}

## Anweisungen
Erkläre in 2-3 Absätzen:
1. Welche Strategie der Agent gewählt hat
2. Wie er die Aufgabe in Tasks unterteilt hat
3. Wie er auf unerwartete Ergebnisse reagiert hat (falls Adaptationen)
4. Was die wichtigsten Erkenntnisse waren

Schreibe für einen technischen Nutzer, aber verständlich."""

        try:
            response = await self.a_run_llm_completion(
                model=self.amd.complex_llm_model,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.5,
                max_tokens=800,task_id="reasoning_explanation"
            )

            return response

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            return f"Could not generate reasoning explanation: {e}"
format_text(text, **context)

Format text with variables

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8569
8570
8571
def format_text(self, text: str, **context) -> str:
    """Format text with variables"""
    return self.variable_manager.format_text(text, context)
get_available_formats()

Erhalte verfügbare Format- und Längen-Optionen

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8450
8451
8452
8453
8454
8455
8456
8457
8458
8459
8460
8461
8462
8463
def get_available_formats(self) -> dict[str, list[str]]:
    """Erhalte verfügbare Format- und Längen-Optionen"""
    return {
        "formats": [f.value for f in ResponseFormat],
        "lengths": [l.value for l in TextLength],
        "format_descriptions": {
            f.value: FormatConfig(response_format=f).get_format_instructions()
            for f in ResponseFormat
        },
        "length_descriptions": {
            l.value: FormatConfig(text_length=l).get_length_instructions()
            for l in TextLength
        }
    }
get_available_variables()

Get available variables for dynamic formatting

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8786
8787
8788
def get_available_variables(self) -> dict[str, str]:
    """Get available variables for dynamic formatting"""
    return self.variable_manager.get_available_variables()
get_context(session_id=None, format_for_llm=True) async

ÜBERARBEITET: Get context über UnifiedContextManager statt verteilte Quellen

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8685
8686
8687
8688
8689
8690
8691
8692
8693
8694
8695
8696
8697
8698
8699
8700
8701
8702
8703
8704
8705
8706
8707
8708
8709
8710
8711
8712
8713
8714
8715
async def get_context(self, session_id: str = None, format_for_llm: bool = True) -> str | dict[str, Any]:
    """
    ÜBERARBEITET: Get context über UnifiedContextManager statt verteilte Quellen
    """
    try:
        session_id = session_id or self.shared.get("session_id", self.active_session)
        query = self.shared.get("current_query", "")

        #Hole unified context über Context Manager
        unified_context = await self.context_manager.build_unified_context(session_id, query, "full")


        if format_for_llm:
            return self.context_manager.get_formatted_context_for_llm(unified_context)
        else:
            return unified_context

    except Exception as e:
        import traceback
        print(traceback.format_exc())
        eprint(f"Failed to generate context via UnifiedContextManager: {e}")

        # FALLBACK: Fallback zu alter Methode falls UnifiedContextManager fehlschlägt
        if format_for_llm:
            return f"Error generating context: {str(e)}"
        else:
            return {
                "error": str(e),
                "generated_at": datetime.now().isoformat(),
                "fallback_mode": True
            }
get_context_statistics()

Get comprehensive context management statistics

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8717
8718
8719
8720
8721
8722
8723
8724
8725
8726
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738
8739
8740
8741
8742
8743
8744
8745
8746
8747
8748
8749
8750
8751
8752
8753
8754
8755
8756
def get_context_statistics(self) -> dict[str, Any]:
    """Get comprehensive context management statistics"""
    stats = {
        "context_system": "advanced_session_aware",
        "compression_threshold": 0.76,
        "max_tokens": getattr(self, 'max_input_tokens', 8000),
        "session_managers": {},
        "context_usage": {},
        "compression_stats": {}
    }

    # Session manager statistics
    session_managers = self.shared.get("session_managers", {})
    for name, manager in session_managers.items():
        stats["session_managers"][name] = {
            "history_length": len(manager.history),
            "max_length": manager.max_length,
            "space_name": manager.space_name
        }

    # Context node statistics if available
    if hasattr(self.task_flow, 'context_manager'):
        context_manager = self.task_flow.context_manager
        stats["compression_stats"] = {
            "compression_threshold": context_manager.compression_threshold,
            "max_tokens": context_manager.max_tokens,
            "active_sessions": len(context_manager.session_managers)
        }

    # LLM call statistics from enhanced node
    llm_stats = self.shared.get("llm_call_stats", {})
    if llm_stats:
        stats["context_usage"] = {
            "total_llm_calls": llm_stats.get("total_calls", 0),
            "context_compression_rate": llm_stats.get("context_compression_rate", 0.0),
            "average_context_tokens": llm_stats.get("context_tokens_used", 0) / max(llm_stats.get("total_calls", 1),
                                                                                    1)
        }

    return stats
get_format_quality_report()

Erhalte detaillierten Format-Qualitätsbericht

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8487
8488
8489
8490
8491
8492
8493
8494
8495
8496
8497
8498
8499
8500
8501
8502
8503
8504
8505
def get_format_quality_report(self) -> dict[str, Any]:
    """Erhalte detaillierten Format-Qualitätsbericht"""
    quality_assessment = self.shared.get("quality_assessment", {})

    if not quality_assessment:
        return {"status": "no_assessment", "message": "No recent quality assessment available"}

    quality_details = quality_assessment.get("quality_details", {})

    return {
        "overall_score": quality_details.get("total_score", 0.0),
        "format_adherence": quality_details.get("format_adherence", 0.0),
        "length_adherence": quality_details.get("length_adherence", 0.0),
        "content_quality": quality_details.get("base_quality", 0.0),
        "llm_assessment": quality_details.get("llm_assessment", 0.0),
        "suggestions": quality_assessment.get("suggestions", []),
        "assessment": quality_assessment.get("quality_assessment", "unknown"),
        "format_config_active": quality_details.get("format_config_used", False)
    }
get_session_storage_stats()

Get comprehensive session storage statistics

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9555
9556
9557
9558
9559
9560
9561
9562
9563
9564
9565
9566
9567
9568
9569
9570
9571
9572
9573
9574
9575
9576
9577
9578
9579
9580
9581
9582
9583
9584
9585
9586
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596
9597
9598
9599
9600
9601
9602
9603
9604
9605
9606
9607
9608
9609
9610
9611
9612
9613
9614
9615
9616
9617
9618
9619
9620
9621
9622
def get_session_storage_stats(self) -> dict[str, Any]:
    """Get comprehensive session storage statistics"""
    try:
        stats = {
            "context_manager_active": bool(self.context_manager),
            "total_sessions": 0,
            "session_details": {},
            "storage_summary": {
                "total_messages": 0,
                "context_snapshots": 0,
                "context_entries": 0,
                "regular_messages": 0
            }
        }

        if not self.context_manager:
            return stats

        stats["total_sessions"] = len(self.context_manager.session_managers)

        for session_id, session in self.context_manager.session_managers.items():
            session_stats = {
                "session_type": "chatsession" if hasattr(session, 'history') else "fallback",
                "message_count": 0,
                "context_snapshots": 0,
                "context_entries": 0,
                "regular_messages": 0,
                "storage_size_estimate": 0
            }

            if hasattr(session, 'history'):
                session_stats["message_count"] = len(session.history)

                for message in session.history:
                    content_size = len(str(message))
                    session_stats["storage_size_estimate"] += content_size

                    metadata = message.get("metadata", {})
                    if metadata.get("is_context_snapshot"):
                        session_stats["context_snapshots"] += 1
                    elif metadata.get("is_context_entry"):
                        session_stats["context_entries"] += 1
                    else:
                        session_stats["regular_messages"] += 1

            elif isinstance(session, dict) and 'history' in session:
                session_stats["message_count"] = len(session['history'])
                session_stats["regular_messages"] = len(session['history'])
                session_stats["storage_size_estimate"] = sum(len(str(msg)) for msg in session['history'])

            stats["session_details"][session_id] = session_stats

            # Update totals
            stats["storage_summary"]["total_messages"] += session_stats["message_count"]
            stats["storage_summary"]["context_snapshots"] += session_stats["context_snapshots"]
            stats["storage_summary"]["context_entries"] += session_stats["context_entries"]
            stats["storage_summary"]["regular_messages"] += session_stats["regular_messages"]

        # Estimate total storage size
        stats["storage_summary"]["estimated_total_size_kb"] = sum(
            details["storage_size_estimate"] for details in stats["session_details"].values()
        ) / 1024

        return stats

    except Exception as e:
        eprint(f"Failed to get session storage stats: {e}")
        return {"error": str(e)}
get_task_execution_summary() async

Erhalte detaillierte Zusammenfassung der Task-Ausführung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8884
8885
8886
8887
8888
8889
8890
8891
8892
8893
8894
8895
8896
8897
8898
8899
8900
8901
8902
8903
8904
8905
8906
8907
8908
8909
8910
8911
8912
8913
8914
8915
8916
8917
8918
8919
8920
8921
8922
8923
8924
async def get_task_execution_summary(self) -> dict[str, Any]:
    """Erhalte detaillierte Zusammenfassung der Task-Ausführung"""
    tasks = self.shared.get("tasks", {})
    results_store = self.shared.get("results", {})

    summary = {
        "total_tasks": len(tasks),
        "completed_tasks": [],
        "failed_tasks": [],
        "task_types_used": {},
        "tools_used": [],
        "adaptations": self.shared.get("plan_adaptations", 0),
        "execution_timeline": []
    }

    for task_id, task in tasks.items():
        task_info = {
            "id": task_id,
            "type": task.type,
            "description": task.description,
            "status": task.status,
            "duration": None
        }

        if task.started_at and task.completed_at:
            duration = (task.completed_at - task.started_at).total_seconds()
            task_info["duration"] = duration

        if task.status == "completed":
            summary["completed_tasks"].append(task_info)
            if isinstance(task, ToolTask):
                summary["tools_used"].append(task.tool_name)
        elif task.status == "failed":
            task_info["error"] = task.error
            summary["failed_tasks"].append(task_info)

        # Task types counting
        task_type = task.type
        summary["task_types_used"][task_type] = summary["task_types_used"].get(task_type, 0) + 1

    return summary
get_tool_by_name(tool_name)

Get tool function by name

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10002
10003
10004
def get_tool_by_name(self, tool_name: str) -> Callable | None:
    """Get tool function by name"""
    return self._tool_registry.get(tool_name, {}).get("function")
get_variable(path, default=None)

Get variable using unified system

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8565
8566
8567
def get_variable(self, path: str, default=None):
    """Get variable using unified system"""
    return self.variable_manager.get(path, default)
get_variable_documentation()

Get comprehensive variable system documentation

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8507
8508
8509
8510
8511
8512
8513
8514
8515
8516
8517
8518
8519
8520
8521
8522
8523
8524
8525
8526
8527
8528
8529
8530
8531
8532
8533
8534
8535
8536
8537
def get_variable_documentation(self) -> str:
    """Get comprehensive variable system documentation"""
    docs = []
    docs.append("# Variable System Documentation\n")

    # Available scopes
    docs.append("## Available Scopes:")
    scope_info = self.variable_manager.get_scope_info()
    for scope_name, info in scope_info.items():
        docs.append(f"- `{scope_name}`: {info['type']} with {info.get('keys', 'N/A')} keys")

    docs.append("\n## Syntax Options:")
    docs.append("- `{{ variable.path }}` - Full path resolution")
    docs.append("- `{variable}` - Simple variable (no dots)")
    docs.append("- `$variable` - Shell-style variable")

    docs.append("\n## Example Usage:")
    docs.append("- `{{ results.task_1.data }}` - Get result from task_1")
    docs.append("- `{{ user.name }}` - Get user name")
    docs.append("- `{agent_name}` - Simple agent name")
    docs.append("- `$timestamp` - System timestamp")

    # Available variables
    docs.append("\n## Available Variables:")
    variables = self.variable_manager.get_available_variables()
    for scope_name, scope_vars in variables.items():
        docs.append(f"\n### {scope_name}:")
        for _var_name, var_info in scope_vars.items():
            docs.append(f"- `{var_info['path']}`: {var_info['preview']} ({var_info['type']})")

    return "\n".join(docs)
initialize_context_awareness() async

Enhanced context awareness with session management

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8599
8600
8601
8602
8603
8604
8605
8606
8607
8608
8609
8610
8611
8612
8613
8614
8615
8616
8617
8618
8619
8620
8621
8622
8623
8624
8625
8626
8627
8628
8629
8630
8631
8632
8633
8634
8635
8636
8637
8638
8639
8640
8641
8642
8643
8644
8645
8646
8647
8648
8649
8650
8651
8652
8653
8654
8655
8656
8657
8658
8659
8660
8661
8662
8663
8664
8665
8666
8667
8668
8669
8670
8671
8672
8673
8674
8675
8676
8677
8678
8679
8680
8681
8682
8683
async def initialize_context_awareness(self):
    """Enhanced context awareness with session management"""

    # Initialize session if not already done
    session_id = self.shared.get("session_id", self.active_session)
    if not self.shared.get("session_initialized"):
        await self.initialize_session_context(session_id)

    # Ensure tool capabilities are loaded
    # add tqdm prigress bar

    from tqdm import tqdm

    if hasattr(self.task_flow, 'llm_reasoner'):
        if "read_from_variables" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_read_from_variables'):
            await self.add_tool(lambda scope, key, purpose: self.task_flow.llm_reasoner._execute_read_from_variables({"scope": scope, "key": key, "purpose": purpose}), "read_from_variables", "Read from variables")
        if "write_to_variables" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_write_to_variables'):
            await self.add_tool(lambda scope, key, value, description: self.task_flow.llm_reasoner._execute_write_to_variables({"scope": scope, "key": key, "value": value, "description": description}), "write_to_variables", "Write to variables")

        if "internal_reasoning" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_internal_reasoning'):
            async def internal_reasoning_tool(thought:str, thought_number:int, total_thoughts:int, next_thought_needed:bool, current_focus:str, key_insights:list[str], potential_issues:list[str], confidence_level:float):
                args = {
                    "thought": thought,
                    "thought_number": thought_number,
                    "total_thoughts": total_thoughts,
                    "next_thought_needed": next_thought_needed,
                    "current_focus": current_focus,
                    "key_insights": key_insights,
                    "potential_issues": potential_issues,
                    "confidence_level": confidence_level
                }
                return await self.task_flow.llm_reasoner._execute_internal_reasoning(args, self.shared)
            await self.add_tool(internal_reasoning_tool, "internal_reasoning", "Internal reasoning")

        if "manage_internal_task_stack" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_manage_task_stack'):
            async def manage_internal_task_stack_tool(action:str, task_description:str, outline_step_ref:str):
                args = {
                    "action": action,
                    "task_description": task_description,
                    "outline_step_ref": outline_step_ref
                }
                return await self.task_flow.llm_reasoner._execute_manage_task_stack(args, self.shared)
            await self.add_tool(manage_internal_task_stack_tool, "manage_internal_task_stack", "Manage internal task stack")

        if "outline_step_completion" not in self.shared["available_tools"] and hasattr(self.task_flow.llm_reasoner, '_execute_outline_step_completion'):
            async def outline_step_completion_tool(step_completed:bool, completion_evidence:str, next_step_focus:str):
                args = {
                    "step_completed": step_completed,
                    "completion_evidence": completion_evidence,
                    "next_step_focus": next_step_focus
                }
                return await self.task_flow.llm_reasoner._execute_outline_step_completion(args, self.shared)
            await self.add_tool(outline_step_completion_tool, "outline_step_completion", "Outline step completion")


    registered_tools = set(self._tool_registry.keys())
    cached_capabilities = list(self._tool_capabilities.keys())  # Create a copy of
    for tool_name in cached_capabilities:
        if tool_name in self._tool_capabilities and tool_name not in registered_tools:
            del self._tool_capabilities[tool_name]
            print(f"Removed outdated capability for unavailable tool: {tool_name}")

    for tool_name in tqdm(self.shared["available_tools"], desc=f"Agent {self.amd.name} Analyzing Tools", unit="tool", colour="green", total=len(self.shared["available_tools"])):
        if tool_name not in self._tool_capabilities:
            tool_info = self._tool_registry.get(tool_name, {})
            description = tool_info.get("description", "No description")
            with Spinner(f"Analyzing tool {tool_name}"):
                await self._analyze_tool_capabilities(tool_name, description, tool_info.get("args_schema", "()"))

        if tool_name in self._tool_capabilities:
            function = self._tool_registry[tool_name]["function"]
            self._tool_capabilities[tool_name]["args_schema"] = get_args_schema(function)

    # Set enhanced system context
    self.shared["system_context"] = {
        "capabilities_summary": self._build_capabilities_summary(),
        "tool_count": len(self.shared["available_tools"]),
        "analysis_loaded": len(self._tool_capabilities),
        "intelligence_level": "high" if self._tool_capabilities else "basic",
        "context_management": "advanced_session_aware",
        "session_managers": len(self.shared.get("session_managers", {})),
    }


    rprint("Advanced context awareness initialized with session management")
initialize_session_context(session_id='default', max_history=200) async

Vereinfachte Session-Initialisierung über UnifiedContextManager

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8573
8574
8575
8576
8577
8578
8579
8580
8581
8582
8583
8584
8585
8586
8587
8588
8589
8590
8591
8592
8593
8594
8595
8596
8597
async def initialize_session_context(self, session_id: str = "default", max_history: int = 200) -> bool:
    """Vereinfachte Session-Initialisierung über UnifiedContextManager"""
    try:
        # Delegation an UnifiedContextManager
        session = await self.context_manager.initialize_session(session_id, max_history)

        # Ensure Variable Manager integration
        if not self.context_manager.variable_manager:
            self.context_manager.variable_manager = self.variable_manager

        # Update shared state (minimal - primary data now in context_manager)
        self.shared["active_session_id"] = session_id
        self.shared["session_initialized"] = True

        # Legacy support: Keep session_managers reference in shared for backward compatibility
        self.shared["session_managers"] = self.context_manager.session_managers

        rprint(f"Session context initialized for {session_id} via UnifiedContextManager")
        return True

    except Exception as e:
        eprint(f"Session context initialization failed: {e}")
        import traceback
        print(traceback.format_exc())
        return False
list_available_checkpoints(max_age_hours=168)

List all available checkpoints with metadata

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9624
9625
9626
9627
9628
9629
9630
9631
9632
9633
9634
9635
9636
9637
9638
9639
9640
9641
9642
9643
9644
9645
9646
9647
9648
9649
9650
9651
9652
9653
9654
9655
9656
9657
9658
9659
9660
9661
9662
9663
9664
9665
9666
9667
9668
9669
9670
9671
9672
9673
9674
9675
9676
9677
9678
9679
9680
9681
9682
9683
9684
9685
9686
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
def list_available_checkpoints(self, max_age_hours: int = 168) -> list[dict[str, Any]]:  # Default 1 week
    """List all available checkpoints with metadata"""
    try:
        from toolboxv2 import get_app
        folder = str(get_app().data_dir) + '/Agents/checkpoint/' + self.amd.name

        if not os.path.exists(folder):
            return []

        checkpoints = []
        for file in os.listdir(folder):
            if file.endswith('.pkl') and file.startswith('agent_checkpoint_'):
                filepath = os.path.join(folder, file)
                try:
                    # Get file info
                    file_stat = os.stat(filepath)
                    file_size = file_stat.st_size
                    modified_time = datetime.fromtimestamp(file_stat.st_mtime)

                    # Extract timestamp from filename
                    timestamp_str = file.replace('agent_checkpoint_', '').replace('.pkl', '')
                    if timestamp_str == 'final_checkpoint':
                        checkpoint_time = modified_time
                        checkpoint_type = "final"
                    else:
                        checkpoint_time = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")
                        checkpoint_type = "regular"

                    # Check age
                    age_hours = (datetime.now() - checkpoint_time).total_seconds() / 3600
                    if age_hours <= max_age_hours:

                        # Try to load checkpoint metadata without full loading
                        metadata = {}
                        try:
                            with open(filepath, 'rb') as f:
                                checkpoint = pickle.load(f)
                            metadata = {
                                "tasks_count": len(checkpoint.task_state) if checkpoint.task_state else 0,
                                "world_model_entries": len(checkpoint.world_model) if checkpoint.world_model else 0,
                                "session_id": checkpoint.metadata.get("session_id", "unknown") if hasattr(
                                    checkpoint, 'metadata') and checkpoint.metadata else "unknown",
                                "last_query": checkpoint.metadata.get("last_query", "unknown")[:100] if hasattr(
                                    checkpoint, 'metadata') and checkpoint.metadata else "unknown"
                            }
                        except:
                            metadata = {"load_error": True}

                        checkpoints.append({
                            "filepath": filepath,
                            "filename": file,
                            "checkpoint_type": checkpoint_type,
                            "timestamp": checkpoint_time.isoformat(),
                            "age_hours": round(age_hours, 1),
                            "file_size_kb": round(file_size / 1024, 1),
                            "metadata": metadata
                        })

                except Exception as e:
                    import traceback
                    print(traceback.format_exc())
                    wprint(f"Could not analyze checkpoint file {file}: {e}")
                    continue

        # Sort by timestamp (newest first)
        checkpoints.sort(key=lambda x: x["timestamp"], reverse=True)

        return checkpoints

    except Exception as e:
        import traceback
        print(traceback.format_exc())
        eprint(f"Failed to list checkpoints: {e}")
        return []
load_context_from_session(session_id, context_type='full') async

Load context from ChatSession storage

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9435
9436
9437
9438
9439
9440
9441
9442
9443
9444
9445
9446
9447
9448
9449
9450
9451
9452
9453
9454
9455
9456
9457
9458
9459
9460
9461
9462
9463
9464
9465
9466
9467
9468
9469
9470
9471
9472
9473
9474
9475
9476
9477
9478
async def load_context_from_session(self, session_id: str, context_type: str = "full") -> dict[str, Any]:
    """Load context from ChatSession storage"""
    try:
        if not self.context_manager:
            return {"error": "Context manager not available"}

        session = self.context_manager.session_managers.get(session_id)
        if not session:
            return {"error": f"Session {session_id} not found"}

        # Search for context snapshots in session history
        context_snapshots = []

        if hasattr(session, 'history'):
            for message in reversed(session.history):  # Search from newest
                if (message.get("role") == "system" and
                    message.get("metadata", {}).get("is_context_snapshot") and
                    message.get("metadata", {}).get("context_type") == context_type):

                    try:
                        # Extract context data
                        content = message.get("content", "")
                        if content.startswith(f"[CONTEXT_SNAPSHOT_{context_type.upper()}]"):
                            json_data = content.replace(f"[CONTEXT_SNAPSHOT_{context_type.upper()}] ", "")
                            context_data = json.loads(json_data)
                            context_snapshots.append({
                                "context": context_data,
                                "timestamp": message.get("timestamp"),
                                "metadata": message.get("metadata", {})
                            })
                    except Exception as e:
                        wprint(f"Failed to parse context snapshot: {e}")

        if context_snapshots:
            # Return most recent context snapshot
            latest_context = context_snapshots[0]
            rprint(f"Loaded context snapshot from session {session_id} (timestamp: {latest_context['timestamp']})")
            return latest_context["context"]
        else:
            return {"error": f"No context snapshots of type '{context_type}' found in session {session_id}"}

    except Exception as e:
        eprint(f"Failed to load context from session: {e}")
        return {"error": str(e)}
load_latest_checkpoint(auto_restore_history=True, max_age_hours=24) async

Vereinfachtes Checkpoint-Laden mit automatischer History-Wiederherstellung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9155
9156
9157
9158
9159
9160
9161
9162
9163
9164
9165
9166
9167
9168
9169
9170
9171
9172
9173
9174
9175
9176
9177
9178
9179
9180
9181
9182
9183
9184
9185
9186
9187
9188
9189
9190
9191
9192
9193
9194
9195
9196
9197
9198
9199
9200
9201
9202
9203
9204
9205
9206
9207
9208
9209
9210
9211
9212
9213
9214
async def load_latest_checkpoint(self, auto_restore_history: bool = True, max_age_hours: int = 24) -> dict[
    str, Any]:
    """Vereinfachtes Checkpoint-Laden mit automatischer History-Wiederherstellung"""
    try:
        from toolboxv2 import get_app
        folder = str(get_app().data_dir) + '/Agents/checkpoint/' + self.amd.name

        if not os.path.exists(folder):
            return {"success": False, "error": "Kein Checkpoint-Verzeichnis gefunden"}

        # Finde neuesten Checkpoint
        checkpoint_files = []
        for file in os.listdir(folder):
            if file.endswith('.pkl') and file.startswith('agent_checkpoint_'):
                filepath = os.path.join(folder, file)
                try:
                    timestamp_str = file.replace('agent_checkpoint_', '').replace('.pkl', '')
                    if timestamp_str == 'final_checkpoint':
                        file_time = datetime.fromtimestamp(os.path.getmtime(filepath))
                    else:
                        file_time = datetime.strptime(timestamp_str, "%Y%m%d_%H%M%S")

                    age_hours = (datetime.now() - file_time).total_seconds() / 3600
                    if age_hours <= max_age_hours:
                        checkpoint_files.append((filepath, file_time, age_hours))
                except Exception:
                    continue

        if not checkpoint_files:
            return {"success": False, "error": f"Keine gültigen Checkpoints in {max_age_hours} Stunden gefunden"}

        # Lade neuesten Checkpoint
        checkpoint_files.sort(key=lambda x: x[1], reverse=True)
        latest_checkpoint_path, latest_timestamp, age_hours = checkpoint_files[0]

        rprint(f"Lade Checkpoint: {latest_checkpoint_path} (Alter: {age_hours:.1f}h)")

        with open(latest_checkpoint_path, 'rb') as f:
            checkpoint: AgentCheckpoint = pickle.load(f)

        # Stelle Agent-Status wieder her
        restore_stats = await self._restore_from_checkpoint_simplified(checkpoint, auto_restore_history)

        # Re-initialisiere Kontext-Awareness
        await self.initialize_context_awareness()

        return {
            "success": True,
            "checkpoint_file": latest_checkpoint_path,
            "checkpoint_age_hours": age_hours,
            "checkpoint_timestamp": latest_timestamp.isoformat(),
            "available_checkpoints": len(checkpoint_files),
            "restore_stats": restore_stats
        }

    except Exception as e:
        eprint(f"Checkpoint-Laden fehlgeschlagen: {e}")
        import traceback
        print(traceback.format_exc())
        return {"success": False, "error": str(e)}
pause() async

Pause agent execution

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8981
8982
8983
8984
8985
8986
8987
8988
8989
8990
8991
8992
8993
8994
async def pause(self) -> bool:
    """Pause agent execution"""
    if not self.is_running:
        return False

    self.is_paused = True
    self.shared["system_status"] = "paused"

    # Create checkpoint
    checkpoint = await self._create_checkpoint()
    await self._save_checkpoint(checkpoint)

    rprint("Agent execution paused")
    return True
resume() async

Resume agent execution

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8996
8997
8998
8999
9000
9001
9002
9003
9004
9005
async def resume(self) -> bool:
    """Resume agent execution"""
    if not self.is_paused:
        return False

    self.is_paused = False
    self.shared["system_status"] = "running"

    rprint("Agent execution resumed")
    return True
save_context_to_file(session_id=None) async

Save current context to file

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9972
9973
9974
9975
9976
9977
9978
9979
9980
9981
9982
9983
9984
9985
9986
9987
async def save_context_to_file(self, session_id: str = None) -> bool:
    """Save current context to file"""
    try:
        context = await self.get_context(session_id=session_id, format_for_llm=False)

        filepath = self._get_context_path(session_id)

        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(context, f, indent=2, ensure_ascii=False, default=str)

        rprint(f"Context saved to: {filepath}")
        return True

    except Exception as e:
        eprint(f"Failed to save context: {e}")
        return False
save_context_to_session(session_id=None, context_type='full') async

Save current context to ChatSession for persistent storage

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
9392
9393
9394
9395
9396
9397
9398
9399
9400
9401
9402
9403
9404
9405
9406
9407
9408
9409
9410
9411
9412
9413
9414
9415
9416
9417
9418
9419
9420
9421
9422
9423
9424
9425
9426
9427
9428
9429
9430
9431
9432
9433
async def save_context_to_session(self, session_id: str = None, context_type: str = "full") -> bool:
    """Save current context to ChatSession for persistent storage"""
    try:
        session_id = session_id or self.shared.get("session_id", "default")

        if not self.context_manager:
            eprint("Context manager not available")
            return False

        # Build comprehensive context
        unified_context = await self.context_manager.build_unified_context(session_id, None, context_type)

        # Create context message for session storage
        context_message = {
            "role": "system",
            "content": f"[CONTEXT_SNAPSHOT_{context_type.upper()}] " + json.dumps(unified_context, default=str),
            "timestamp": datetime.now().isoformat(),
            "context_type": context_type,
            "metadata": {
                "is_context_snapshot": True,
                "context_version": "2.0",
                "agent_name": self.amd.name,
                "session_stats": unified_context.get("session_stats", {}),
                "variables_count": len(unified_context.get("variables", {}).get("recent_results", [])),
                "execution_state": unified_context.get("execution_state", {}).get("system_status", "unknown")
            }
        }

        # Store in session
        await self.context_manager.add_interaction(
            session_id,
            "system",
            context_message["content"],
            metadata=context_message["metadata"]
        )

        rprint(f"Context snapshot saved to session {session_id} (type: {context_type})")
        return True

    except Exception as e:
        eprint(f"Failed to save context to session: {e}")
        return False
set_persona(name, style='professional', tone='friendly', personality_traits=None, apply_method='system_prompt', integration_level='light', custom_instructions='')

Set agent persona mit erweiterten Konfigurationsmöglichkeiten

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8758
8759
8760
8761
8762
8763
8764
8765
8766
8767
8768
8769
8770
8771
8772
8773
8774
8775
def set_persona(self, name: str, style: str = "professional", tone: str = "friendly",
                personality_traits: list[str] = None, apply_method: str = "system_prompt",
                integration_level: str = "light", custom_instructions: str = ""):
    """Set agent persona mit erweiterten Konfigurationsmöglichkeiten"""
    if personality_traits is None:
        personality_traits = ["helpful", "concise"]

    self.amd.persona = PersonaConfig(
        name=name,
        style=style,
        tone=tone,
        personality_traits=personality_traits,
        custom_instructions=custom_instructions,
        apply_method=apply_method,
        integration_level=integration_level
    )

    rprint(f"Persona set: {name} ({style}, {tone}) - Method: {apply_method}, Level: {integration_level}")
set_response_format(response_format, text_length, custom_instructions='', quality_threshold=0.7)

Dynamische Format- und Längen-Konfiguration

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8361
8362
8363
8364
8365
8366
8367
8368
8369
8370
8371
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386
8387
8388
8389
8390
8391
8392
8393
8394
8395
8396
8397
8398
8399
8400
8401
8402
8403
8404
def set_response_format(
    self,
    response_format: str,
    text_length: str,
    custom_instructions: str = "",
    quality_threshold: float = 0.7
):
    """Dynamische Format- und Längen-Konfiguration"""

    # Validiere Eingaben
    try:
        ResponseFormat(response_format)
        TextLength(text_length)
    except ValueError:
        available_formats = [f.value for f in ResponseFormat]
        available_lengths = [l.value for l in TextLength]
        raise ValueError(
            f"Invalid format or length. "
            f"Available formats: {available_formats}. "
            f"Available lengths: {available_lengths}"
        )

    # Erstelle oder aktualisiere Persona
    if not self.amd.persona:
        self.amd.persona = PersonaConfig(name="Assistant")

    # Erstelle Format-Konfiguration
    format_config = FormatConfig(
        response_format=ResponseFormat(response_format),
        text_length=TextLength(text_length),
        custom_instructions=custom_instructions,
        quality_threshold=quality_threshold
    )

    self.amd.persona.format_config = format_config

    # Aktualisiere Personality Traits mit Format-Hinweisen
    self._update_persona_with_format(response_format, text_length)

    # Update shared state
    self.shared["persona_config"] = self.amd.persona
    self.shared["format_config"] = format_config

    rprint(f"Response format set: {response_format}, length: {text_length}")
set_variable(path, value)

Set variable using unified system

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
8561
8562
8563
def set_variable(self, path: str, value: Any):
    """Set variable using unified system"""
    self.variable_manager.set(path, value)
setup_a2a_server(host='0.0.0.0', port=5000, **kwargs)

Setup A2A server for bidirectional communication

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10245
10246
10247
10248
10249
10250
10251
10252
10253
10254
10255
10256
10257
10258
10259
10260
10261
10262
10263
10264
10265
10266
10267
10268
10269
10270
10271
10272
10273
10274
10275
def setup_a2a_server(self, host: str = "0.0.0.0", port: int = 5000, **kwargs):
    """Setup A2A server for bidirectional communication"""
    if not A2A_AVAILABLE:
        wprint("A2A not available, cannot setup server")
        return

    try:
        self.a2a_server = A2AServer(
            host=host,
            port=port,
            agent_card=AgentCard(
                name=self.amd.name,
                description="Production-ready PocketFlow agent",
                version="1.0.0"
            ),
            **kwargs
        )

        # Register agent methods
        @self.a2a_server.route("/run")
        async def handle_run(request_data):
            query = request_data.get("query", "")
            session_id = request_data.get("session_id", "a2a_session")

            response = await self.a_run(query, session_id=session_id)
            return {"response": response}

        rprint(f"A2A server setup on {host}:{port}")

    except Exception as e:
        eprint(f"Failed to setup A2A server: {e}")
setup_mcp_server(host='0.0.0.0', port=8000, name=None, **kwargs)

Setup MCP server

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10277
10278
10279
10280
10281
10282
10283
10284
10285
10286
10287
10288
10289
10290
10291
10292
10293
10294
10295
10296
def setup_mcp_server(self, host: str = "0.0.0.0", port: int = 8000, name: str = None, **kwargs):
    """Setup MCP server"""
    if not MCP_AVAILABLE:
        wprint("MCP not available, cannot setup server")
        return

    try:
        server_name = name or f"{self.amd.name}_MCP"
        self.mcp_server = FastMCP(server_name)

        # Register agent as MCP tool
        @self.mcp_server.tool()
        async def agent_run(query: str, session_id: str = "mcp_session") -> str:
            """Execute agent with given query"""
            return await self.a_run(query, session_id=session_id)

        rprint(f"MCP server setup: {server_name}")

    except Exception as e:
        eprint(f"Failed to setup MCP server: {e}")
start_servers() async

Start all configured servers

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10300
10301
10302
10303
10304
10305
10306
10307
10308
10309
10310
10311
10312
async def start_servers(self):
    """Start all configured servers"""
    tasks = []

    if self.a2a_server:
        tasks.append(asyncio.create_task(self.a2a_server.start()))

    if self.mcp_server:
        tasks.append(asyncio.create_task(self.mcp_server.run()))

    if tasks:
        rprint(f"Starting {len(tasks)} servers...")
        await asyncio.gather(*tasks, return_exceptions=True)
status(pretty_print=False)

Get comprehensive agent status with optional pretty printing

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10496
10497
10498
10499
10500
10501
10502
10503
10504
10505
10506
10507
10508
10509
10510
10511
10512
10513
10514
10515
10516
10517
10518
10519
10520
10521
10522
10523
10524
10525
10526
10527
10528
10529
10530
10531
10532
10533
10534
10535
10536
10537
10538
10539
10540
10541
10542
10543
10544
10545
10546
10547
10548
10549
10550
10551
10552
10553
10554
10555
10556
10557
10558
10559
10560
10561
10562
10563
10564
10565
10566
10567
10568
10569
10570
10571
10572
10573
10574
10575
10576
10577
10578
10579
10580
10581
10582
10583
10584
10585
10586
10587
10588
10589
10590
10591
10592
10593
10594
10595
10596
10597
10598
10599
10600
10601
10602
10603
10604
10605
10606
10607
10608
10609
10610
10611
10612
10613
10614
10615
10616
10617
10618
10619
10620
10621
10622
10623
10624
10625
10626
10627
10628
10629
10630
10631
10632
10633
10634
10635
10636
10637
10638
10639
10640
10641
10642
10643
10644
10645
10646
10647
10648
10649
10650
10651
10652
10653
10654
10655
10656
10657
10658
10659
10660
10661
10662
10663
10664
10665
10666
10667
10668
10669
10670
10671
10672
10673
10674
10675
10676
10677
10678
10679
10680
10681
10682
10683
10684
10685
10686
10687
10688
10689
10690
10691
10692
10693
10694
10695
10696
10697
10698
10699
10700
10701
10702
10703
10704
10705
10706
10707
10708
10709
10710
10711
10712
10713
10714
10715
10716
10717
10718
10719
10720
10721
10722
10723
10724
10725
10726
10727
10728
10729
10730
10731
10732
10733
10734
10735
10736
10737
10738
10739
10740
10741
10742
10743
10744
10745
10746
10747
10748
10749
10750
10751
10752
10753
10754
10755
10756
10757
10758
10759
10760
10761
10762
10763
10764
10765
10766
10767
10768
10769
10770
10771
10772
10773
10774
10775
10776
10777
10778
10779
10780
10781
10782
10783
10784
10785
10786
10787
10788
def status(self, pretty_print: bool = False) -> dict[str, Any] | str:
    """Get comprehensive agent status with optional pretty printing"""

    # Core status information
    base_status = {
        "agent_info": {
            "name": self.amd.name,
            "version": "2.0",
            "type": "FlowAgent"
        },
        "runtime_status": {
            "status": self.shared.get("system_status", "idle"),
            "is_running": self.is_running,
            "is_paused": self.is_paused,
            "uptime_seconds": (datetime.now() - getattr(self, '_start_time', datetime.now())).total_seconds()
        },
        "task_execution": {
            "total_tasks": len(self.shared.get("tasks", {})),
            "active_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "running"]),
            "completed_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "completed"]),
            "failed_tasks": len([t for t in self.shared.get("tasks", {}).values() if t.status == "failed"]),
            "plan_adaptations": self.shared.get("plan_adaptations", 0)
        },
        "conversation": {
            "turns": len(self.shared.get("conversation_history", [])),
            "session_id": self.shared.get("session_id", self.active_session),
            "current_user": self.shared.get("user_id"),
            "last_query": self.shared.get("current_query", "")[:100] + "..." if len(
                self.shared.get("current_query", "")) > 100 else self.shared.get("current_query", "")
        },
        "capabilities": {
            "available_tools": len(self.shared.get("available_tools", [])),
            "tool_names": list(self.shared.get("available_tools", [])),
            "analyzed_tools": len(self._tool_capabilities),
            "world_model_size": len(self.shared.get("world_model", {})),
            "intelligence_level": "high" if self._tool_capabilities else "basic"
        },
        "memory_context": {
            "session_initialized": self.shared.get("session_initialized", False),
            "session_managers": len(self.shared.get("session_managers", {})),
            "context_system": "advanced_session_aware" if self.shared.get("session_initialized") else "basic",
            "variable_scopes": len(self.variable_manager.get_scope_info()) if hasattr(self,
                                                                                      'variable_manager') else 0
        },
        "performance": {
            "total_cost": self.total_cost,
            "checkpoint_enabled": self.enable_pause_resume,
            "last_checkpoint": self.last_checkpoint.isoformat() if self.last_checkpoint else None,
            "max_parallel_tasks": self.max_parallel_tasks
        },
        "servers": {
            "a2a_server": self.a2a_server is not None,
            "mcp_server": self.mcp_server is not None,
            "server_count": sum([self.a2a_server is not None, self.mcp_server is not None])
        },
        "configuration": {
            "fast_llm_model": self.amd.fast_llm_model,
            "complex_llm_model": self.amd.complex_llm_model,
            "use_fast_response": getattr(self.amd, 'use_fast_response', False),
            "max_input_tokens": getattr(self.amd, 'max_input_tokens', 8000),
            "persona_configured": self.amd.persona is not None,
            "format_config": bool(getattr(self.amd.persona, 'format_config', None)) if self.amd.persona else False
        }
    }

    # Add detailed execution summary if tasks exist
    tasks = self.shared.get("tasks", {})
    if tasks:
        task_types_used = {}
        tools_used = []
        execution_timeline = []

        for task_id, task in tasks.items():
            # Count task types
            task_type = getattr(task, 'type', 'unknown')
            task_types_used[task_type] = task_types_used.get(task_type, 0) + 1

            # Collect tools used
            if hasattr(task, 'tool_name') and task.tool_name:
                tools_used.append(task.tool_name)

            # Timeline info
            if hasattr(task, 'started_at') and task.started_at:
                timeline_entry = {
                    "task_id": task_id,
                    "type": task_type,
                    "started": task.started_at.isoformat(),
                    "status": getattr(task, 'status', 'unknown')
                }
                if hasattr(task, 'completed_at') and task.completed_at:
                    timeline_entry["completed"] = task.completed_at.isoformat()
                    timeline_entry["duration"] = (task.completed_at - task.started_at).total_seconds()
                execution_timeline.append(timeline_entry)

        base_status["task_execution"].update({
            "task_types_used": task_types_used,
            "tools_used": list(set(tools_used)),
            "execution_timeline": execution_timeline[-5:]  # Last 5 tasks
        })

    # Add context statistics
    if hasattr(self.task_flow, 'context_manager'):
        context_manager = self.task_flow.context_manager
        base_status["memory_context"].update({
            "compression_threshold": context_manager.compression_threshold,
            "max_tokens": context_manager.max_tokens,
            "active_context_sessions": len(getattr(context_manager, 'session_managers', {}))
        })

    # Add variable system info
    if hasattr(self, 'variable_manager'):
        available_vars = self.variable_manager.get_available_variables()
        scope_info = self.variable_manager.get_scope_info()

        base_status["variable_system"] = {
            "total_scopes": len(scope_info),
            "scope_names": list(scope_info.keys()),
            "total_variables": sum(len(vars) for vars in available_vars.values()),
            "scope_details": {
                scope: {"type": info["type"], "variables": len(available_vars.get(scope, {}))}
                for scope, info in scope_info.items()
            }
        }

    # Add format quality info if available
    quality_assessment = self.shared.get("quality_assessment", {})
    if quality_assessment:
        quality_details = quality_assessment.get("quality_details", {})
        base_status["format_quality"] = {
            "overall_score": quality_details.get("total_score", 0.0),
            "format_adherence": quality_details.get("format_adherence", 0.0),
            "length_adherence": quality_details.get("length_adherence", 0.0),
            "content_quality": quality_details.get("base_quality", 0.0),
            "assessment": quality_assessment.get("quality_assessment", "unknown"),
            "has_suggestions": bool(quality_assessment.get("suggestions", []))
        }

    # Add LLM usage statistics
    llm_stats = self.shared.get("llm_call_stats", {})
    if llm_stats:
        base_status["llm_usage"] = {
            "total_calls": llm_stats.get("total_calls", 0),
            "context_compression_rate": llm_stats.get("context_compression_rate", 0.0),
            "average_context_tokens": llm_stats.get("context_tokens_used", 0) / max(llm_stats.get("total_calls", 1),
                                                                                    1),
            "total_tokens_used": llm_stats.get("total_tokens_used", 0)
        }

    # Add timestamp
    base_status["timestamp"] = datetime.now().isoformat()

    if not pretty_print:
        return base_status

    # Pretty print using EnhancedVerboseOutput
    try:
        from toolboxv2.mods.isaa.extras.verbose_output import EnhancedVerboseOutput
        verbose_output = EnhancedVerboseOutput(verbose=True)

        # Header
        verbose_output.log_header(f"Agent Status: {base_status['agent_info']['name']}")

        # Runtime Status
        status_color = {
            "running": "SUCCESS",
            "paused": "WARNING",
            "idle": "INFO",
            "error": "ERROR"
        }.get(base_status["runtime_status"]["status"], "INFO")

        getattr(verbose_output, f"print_{status_color.lower()}")(
            f"Status: {base_status['runtime_status']['status'].upper()}"
        )

        # Task Execution Summary
        task_exec = base_status["task_execution"]
        if task_exec["total_tasks"] > 0:
            verbose_output.formatter.print_section(
                "Task Execution",
                f"Total: {task_exec['total_tasks']} | "
                f"Completed: {task_exec['completed_tasks']} | "
                f"Failed: {task_exec['failed_tasks']} | "
                f"Active: {task_exec['active_tasks']}\n"
                f"Adaptations: {task_exec['plan_adaptations']}"
            )

            if task_exec.get("tools_used"):
                verbose_output.formatter.print_section(
                    "Tools Used",
                    ", ".join(task_exec["tools_used"])
                )

        # Capabilities
        caps = base_status["capabilities"]
        verbose_output.formatter.print_section(
            "Capabilities",
            f"Intelligence Level: {caps['intelligence_level']}\n"
            f"Available Tools: {caps['available_tools']}\n"
            f"Analyzed Tools: {caps['analyzed_tools']}\n"
            f"World Model Size: {caps['world_model_size']}"
        )

        # Memory & Context
        memory = base_status["memory_context"]
        verbose_output.formatter.print_section(
            "Memory & Context",
            f"Context System: {memory['context_system']}\n"
            f"Session Managers: {memory['session_managers']}\n"
            f"Variable Scopes: {memory['variable_scopes']}\n"
            f"Session Initialized: {memory['session_initialized']}"
        )

        # Configuration
        config = base_status["configuration"]
        verbose_output.formatter.print_section(
            "Configuration",
            f"Fast LLM: {config['fast_llm_model']}\n"
            f"Complex LLM: {config['complex_llm_model']}\n"
            f"Max Tokens: {config['max_input_tokens']}\n"
            f"Persona: {'Configured' if config['persona_configured'] else 'Default'}\n"
            f"Format Config: {'Active' if config['format_config'] else 'None'}"
        )

        # Performance
        perf = base_status["performance"]
        verbose_output.formatter.print_section(
            "Performance",
            f"Total Cost: ${perf['total_cost']:.4f}\n"
            f"Checkpointing: {'Enabled' if perf['checkpoint_enabled'] else 'Disabled'}\n"
            f"Max Parallel Tasks: {perf['max_parallel_tasks']}\n"
            f"Last Checkpoint: {perf['last_checkpoint'] or 'None'}"
        )

        # Variable System Details
        if "variable_system" in base_status:
            var_sys = base_status["variable_system"]
            scope_details = []
            for scope, details in var_sys["scope_details"].items():
                scope_details.append(f"{scope}: {details['variables']} variables ({details['type']})")

            verbose_output.formatter.print_section(
                "Variable System",
                f"Total Scopes: {var_sys['total_scopes']}\n"
                f"Total Variables: {var_sys['total_variables']}\n" +
                "\n".join(scope_details)
            )

        # Format Quality
        if "format_quality" in base_status:
            quality = base_status["format_quality"]
            verbose_output.formatter.print_section(
                "Format Quality",
                f"Overall Score: {quality['overall_score']:.2f}\n"
                f"Format Adherence: {quality['format_adherence']:.2f}\n"
                f"Length Adherence: {quality['length_adherence']:.2f}\n"
                f"Content Quality: {quality['content_quality']:.2f}\n"
                f"Assessment: {quality['assessment']}"
            )

        # LLM Usage
        if "llm_usage" in base_status:
            llm = base_status["llm_usage"]
            verbose_output.formatter.print_section(
                "LLM Usage Statistics",
                f"Total Calls: {llm['total_calls']}\n"
                f"Avg Context Tokens: {llm['average_context_tokens']:.1f}\n"
                f"Total Tokens: {llm['total_tokens_used']}\n"
                f"Compression Rate: {llm['context_compression_rate']:.2%}"
            )

        # Servers
        servers = base_status["servers"]
        if servers["server_count"] > 0:
            server_status = []
            if servers["a2a_server"]:
                server_status.append("A2A Server: Active")
            if servers["mcp_server"]:
                server_status.append("MCP Server: Active")

            verbose_output.formatter.print_section(
                "Servers",
                "\n".join(server_status)
            )

        verbose_output.print_separator()
        verbose_output.print_info(f"Status generated at: {base_status['timestamp']}")

        return "Status printed above"

    except Exception:
        # Fallback to JSON if pretty print fails
        import json
        return json.dumps(base_status, indent=2, default=str)
FormatConfig dataclass

Konfiguration für Response-Format und -Länge

Source code in toolboxv2/mods/isaa/base/Agent/types.py
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
@dataclass
class FormatConfig:
    """Konfiguration für Response-Format und -Länge"""
    response_format: ResponseFormat = ResponseFormat.FREE_TEXT
    text_length: TextLength = TextLength.CHAT_CONVERSATION
    custom_instructions: str = ""
    strict_format_adherence: bool = True
    quality_threshold: float = 0.7

    def get_format_instructions(self) -> str:
        """Generiere Format-spezifische Anweisungen"""
        format_instructions = {
            ResponseFormat.FREE_TEXT: "Use natural continuous text without special formatting.",
            ResponseFormat.WITH_TABLES: "Integrate tables for structured data representation. Use Markdown tables.",
            ResponseFormat.WITH_BULLET_POINTS: "Structure information with bullet points (•, -, *) for better readability.",
            ResponseFormat.WITH_LISTS: "Use numbered and unnumbered lists to organize content.",
            ResponseFormat.TEXT_ONLY: "Plain text only without formatting, symbols, or structural elements.",
            ResponseFormat.MD_TEXT: "Full Markdown formatting with headings, code blocks, links, etc.",
            ResponseFormat.YAML_TEXT: "Structure responses in YAML format for machine-readable output.",
            ResponseFormat.JSON_TEXT: "Format responses as a JSON structure for API integration.",
            ResponseFormat.PSEUDO_CODE: "Use pseudocode structure for algorithmic or logical explanations.",
            ResponseFormat.CODE_STRUCTURE: "Structure like code with indentation, comments, and logical blocks."
        }
        return format_instructions.get(self.response_format, "Standard-Formatierung.")

    def get_length_instructions(self) -> str:
        """Generiere Längen-spezifische Anweisungen"""
        length_instructions = {
            TextLength.MINI_CHAT: "Very short, concise answers (1–2 sentences, max 50 words). Chat style.",
            TextLength.CHAT_CONVERSATION: "Moderate conversation length (2–4 sentences, 50–150 words). Natural conversational style.",
            TextLength.TABLE_CONVERSATION: "Structured, tabular presentation with compact explanations (100–250 words).",
            TextLength.DETAILED_INDEPTH: "Comprehensive, detailed explanations (300–800 words) with depth and context.",
            TextLength.PHD_LEVEL: "Academic depth with extensive explanations (800+ words), references, and technical terminology."
        }
        return length_instructions.get(self.text_length, "Standard-Länge.")

    def get_combined_instructions(self) -> str:
        """Kombiniere Format- und Längen-Anweisungen"""
        instructions = []
        instructions.append("## Format-Anforderungen:")
        instructions.append(self.get_format_instructions())
        instructions.append("\n## Längen-Anforderungen:")
        instructions.append(self.get_length_instructions())

        if self.custom_instructions:
            instructions.append("\n## Zusätzliche Anweisungen:")
            instructions.append(self.custom_instructions)

        if self.strict_format_adherence:
            instructions.append("\n## ATTENTION: STRICT FORMAT ADHERENCE REQUIRED!")

        return "\n".join(instructions)

    def get_expected_word_range(self) -> tuple[int, int]:
        """Erwartete Wortanzahl für Qualitätsbewertung"""
        ranges = {
            TextLength.MINI_CHAT: (10, 50),
            TextLength.CHAT_CONVERSATION: (50, 150),
            TextLength.TABLE_CONVERSATION: (100, 250),
            TextLength.DETAILED_INDEPTH: (300, 800),
            TextLength.PHD_LEVEL: (800, 2000)
        }
        return ranges.get(self.text_length, (50, 200))
get_combined_instructions()

Kombiniere Format- und Längen-Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
def get_combined_instructions(self) -> str:
    """Kombiniere Format- und Längen-Anweisungen"""
    instructions = []
    instructions.append("## Format-Anforderungen:")
    instructions.append(self.get_format_instructions())
    instructions.append("\n## Längen-Anforderungen:")
    instructions.append(self.get_length_instructions())

    if self.custom_instructions:
        instructions.append("\n## Zusätzliche Anweisungen:")
        instructions.append(self.custom_instructions)

    if self.strict_format_adherence:
        instructions.append("\n## ATTENTION: STRICT FORMAT ADHERENCE REQUIRED!")

    return "\n".join(instructions)
get_expected_word_range()

Erwartete Wortanzahl für Qualitätsbewertung

Source code in toolboxv2/mods/isaa/base/Agent/types.py
411
412
413
414
415
416
417
418
419
420
def get_expected_word_range(self) -> tuple[int, int]:
    """Erwartete Wortanzahl für Qualitätsbewertung"""
    ranges = {
        TextLength.MINI_CHAT: (10, 50),
        TextLength.CHAT_CONVERSATION: (50, 150),
        TextLength.TABLE_CONVERSATION: (100, 250),
        TextLength.DETAILED_INDEPTH: (300, 800),
        TextLength.PHD_LEVEL: (800, 2000)
    }
    return ranges.get(self.text_length, (50, 200))
get_format_instructions()

Generiere Format-spezifische Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
def get_format_instructions(self) -> str:
    """Generiere Format-spezifische Anweisungen"""
    format_instructions = {
        ResponseFormat.FREE_TEXT: "Use natural continuous text without special formatting.",
        ResponseFormat.WITH_TABLES: "Integrate tables for structured data representation. Use Markdown tables.",
        ResponseFormat.WITH_BULLET_POINTS: "Structure information with bullet points (•, -, *) for better readability.",
        ResponseFormat.WITH_LISTS: "Use numbered and unnumbered lists to organize content.",
        ResponseFormat.TEXT_ONLY: "Plain text only without formatting, symbols, or structural elements.",
        ResponseFormat.MD_TEXT: "Full Markdown formatting with headings, code blocks, links, etc.",
        ResponseFormat.YAML_TEXT: "Structure responses in YAML format for machine-readable output.",
        ResponseFormat.JSON_TEXT: "Format responses as a JSON structure for API integration.",
        ResponseFormat.PSEUDO_CODE: "Use pseudocode structure for algorithmic or logical explanations.",
        ResponseFormat.CODE_STRUCTURE: "Structure like code with indentation, comments, and logical blocks."
    }
    return format_instructions.get(self.response_format, "Standard-Formatierung.")
get_length_instructions()

Generiere Längen-spezifische Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
383
384
385
386
387
388
389
390
391
392
def get_length_instructions(self) -> str:
    """Generiere Längen-spezifische Anweisungen"""
    length_instructions = {
        TextLength.MINI_CHAT: "Very short, concise answers (1–2 sentences, max 50 words). Chat style.",
        TextLength.CHAT_CONVERSATION: "Moderate conversation length (2–4 sentences, 50–150 words). Natural conversational style.",
        TextLength.TABLE_CONVERSATION: "Structured, tabular presentation with compact explanations (100–250 words).",
        TextLength.DETAILED_INDEPTH: "Comprehensive, detailed explanations (300–800 words) with depth and context.",
        TextLength.PHD_LEVEL: "Academic depth with extensive explanations (800+ words), references, and technical terminology."
    }
    return length_instructions.get(self.text_length, "Standard-Länge.")
LLMReasonerNode

Bases: AsyncNode

Enhanced strategic reasoning core with outline-driven execution, context management, auto-recovery, and intensive variable system integration.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689
4690
4691
4692
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708
4709
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814
4815
4816
4817
4818
4819
4820
4821
4822
4823
4824
4825
4826
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847
4848
4849
4850
4851
4852
4853
4854
4855
4856
4857
4858
4859
4860
4861
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883
4884
4885
4886
4887
4888
4889
4890
4891
4892
4893
4894
4895
4896
4897
4898
4899
4900
4901
4902
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914
4915
4916
4917
4918
4919
4920
4921
4922
4923
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963
4964
4965
4966
4967
4968
4969
4970
4971
4972
4973
4974
4975
4976
4977
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993
4994
4995
4996
4997
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014
5015
5016
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052
5053
5054
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089
5090
5091
5092
5093
5094
5095
5096
5097
5098
5099
5100
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120
5121
5122
5123
5124
5125
5126
5127
5128
5129
5130
5131
5132
5133
5134
5135
5136
5137
5138
5139
5140
5141
5142
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152
5153
5154
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168
5169
5170
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180
5181
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219
5220
5221
5222
5223
5224
5225
5226
5227
5228
5229
5230
5231
5232
5233
5234
5235
5236
5237
5238
5239
5240
5241
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255
5256
5257
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289
5290
5291
5292
5293
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350
5351
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391
5392
5393
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549
5550
5551
5552
5553
5554
5555
5556
5557
5558
5559
5560
5561
5562
5563
5564
5565
5566
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593
5594
5595
5596
5597
5598
5599
5600
5601
5602
5603
5604
5605
5606
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618
5619
5620
5621
5622
5623
5624
5625
5626
5627
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645
5646
5647
5648
5649
5650
5651
5652
5653
5654
5655
5656
5657
5658
5659
5660
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688
5689
5690
5691
5692
5693
5694
5695
5696
5697
5698
5699
5700
5701
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730
5731
5732
5733
5734
5735
5736
5737
5738
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751
5752
5753
5754
5755
5756
5757
5758
5759
5760
5761
5762
5763
5764
5765
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793
5794
5795
5796
5797
5798
5799
5800
5801
5802
5803
5804
5805
5806
5807
5808
5809
5810
5811
5812
5813
5814
5815
5816
5817
5818
5819
5820
5821
5822
5823
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833
5834
5835
5836
5837
5838
5839
5840
5841
5842
5843
5844
5845
5846
5847
5848
5849
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859
5860
5861
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894
5895
5896
5897
5898
5899
5900
5901
5902
5903
5904
5905
5906
5907
5908
5909
5910
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924
5925
5926
5927
5928
5929
5930
5931
5932
5933
5934
5935
5936
5937
5938
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949
5950
5951
5952
5953
5954
5955
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005
6006
6007
6008
6009
6010
6011
6012
6013
6014
6015
6016
6017
6018
6019
6020
6021
6022
6023
6024
6025
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036
6037
6038
6039
6040
6041
6042
6043
6044
6045
6046
6047
6048
6049
6050
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064
6065
6066
6067
6068
6069
6070
6071
6072
6073
6074
6075
6076
6077
6078
6079
6080
6081
6082
6083
6084
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099
6100
6101
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111
6112
6113
6114
6115
6116
6117
6118
6119
6120
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130
6131
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149
6150
6151
6152
6153
6154
6155
6156
6157
6158
6159
6160
6161
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196
6197
6198
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213
6214
6215
6216
6217
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264
6265
6266
6267
6268
6269
6270
6271
6272
6273
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283
6284
6285
6286
6287
6288
6289
6290
6291
6292
6293
6294
6295
6296
6297
6298
6299
6300
6301
6302
6303
6304
6305
6306
6307
6308
6309
6310
6311
6312
6313
6314
6315
6316
6317
6318
6319
6320
6321
6322
6323
6324
6325
6326
6327
6328
6329
6330
6331
6332
6333
6334
6335
6336
6337
6338
6339
6340
6341
6342
6343
6344
6345
6346
6347
6348
6349
6350
6351
6352
6353
6354
6355
6356
6357
6358
6359
6360
6361
6362
6363
6364
6365
6366
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390
6391
6392
6393
6394
6395
6396
6397
6398
6399
6400
6401
6402
6403
6404
6405
6406
6407
6408
6409
6410
6411
6412
6413
6414
6415
6416
6417
6418
6419
6420
6421
6422
6423
6424
6425
6426
6427
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445
6446
6447
6448
6449
6450
6451
6452
6453
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517
6518
6519
6520
6521
6522
6523
6524
6525
6526
6527
6528
6529
6530
6531
6532
6533
6534
6535
6536
6537
6538
6539
6540
6541
6542
6543
6544
6545
6546
6547
6548
6549
6550
6551
6552
6553
6554
6555
6556
6557
6558
6559
6560
6561
6562
6563
6564
6565
6566
6567
6568
6569
6570
6571
6572
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655
6656
6657
6658
6659
6660
6661
6662
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694
6695
6696
6697
6698
6699
6700
6701
6702
6703
6704
6705
6706
6707
6708
6709
6710
6711
6712
6713
6714
6715
6716
6717
6718
6719
6720
6721
6722
6723
6724
6725
6726
6727
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737
6738
6739
6740
6741
6742
6743
6744
6745
6746
6747
6748
6749
6750
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768
6769
6770
6771
6772
6773
6774
6775
6776
6777
6778
6779
6780
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790
6791
6792
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821
6822
6823
6824
6825
6826
6827
6828
6829
6830
6831
6832
6833
6834
6835
6836
6837
6838
6839
6840
6841
6842
6843
6844
6845
6846
6847
6848
6849
6850
6851
6852
6853
6854
6855
6856
6857
6858
6859
6860
6861
6862
6863
6864
6865
6866
6867
6868
6869
6870
6871
6872
6873
6874
6875
6876
6877
6878
6879
6880
6881
6882
6883
6884
6885
6886
6887
6888
6889
6890
6891
6892
6893
6894
6895
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923
6924
6925
6926
6927
6928
6929
6930
6931
6932
6933
6934
6935
6936
6937
6938
6939
6940
6941
6942
6943
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954
6955
6956
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978
6979
6980
6981
6982
6983
6984
6985
6986
6987
6988
6989
6990
6991
6992
6993
6994
6995
6996
6997
6998
6999
7000
7001
7002
7003
7004
7005
7006
7007
7008
7009
7010
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021
7022
7023
7024
7025
7026
7027
7028
7029
7030
7031
7032
7033
7034
7035
7036
7037
7038
7039
7040
7041
7042
7043
7044
7045
7046
7047
7048
7049
7050
7051
7052
7053
7054
7055
7056
7057
7058
7059
7060
7061
7062
7063
7064
7065
7066
7067
7068
7069
7070
7071
7072
7073
7074
7075
7076
7077
7078
7079
7080
7081
7082
7083
@with_progress_tracking
class LLMReasonerNode(AsyncNode):
    """
    Enhanced strategic reasoning core with outline-driven execution,
    context management, auto-recovery, and intensive variable system integration.
    """

    def __init__(self, max_reasoning_loops: int = 12, **kwargs):
        super().__init__(**kwargs)
        self.max_reasoning_loops = max_reasoning_loops
        self.reasoning_context = []
        self.internal_task_stack = []
        self.meta_tools_registry = {}
        self.current_loop_count = 0
        self.current_reasoning_count = 0
        self.agent_instance: FlowAgent = None

        # Enhanced tracking systems
        self.outline = None
        self.current_outline_step = 0
        self.step_completion_tracking = {}
        self.loop_detection_memory = []
        self.context_summary_threshold = 15
        self.max_context_size = 30
        self.performance_metrics = {
                "loop_times": [],
                "progress_loops": 0,
                "total_loops": 0
            }
        self.auto_recovery_attempts = 0
        self.max_auto_recovery = 8
        self.variable_manager = None

        # Anti-loop mechanisms
        self.last_action_signatures = []
        self.step_enforcement_active = True
        self.mandatory_progress_check = True

    async def prep_async(self, shared):
        """Enhanced initialization with variable system integration"""
        # Reset for new execution
        self.reasoning_context = []
        self.internal_task_stack = []
        self.current_loop_count = 0
        self.current_reasoning_count = 0
        self.outline = None
        self.current_outline_step = 0
        self.step_completion_tracking = {}
        self.loop_detection_memory = []
        self.performance_metrics = {
            "loop_times": [],
            "progress_loops": 0,
            "total_loops": 0
        }
        self.auto_recovery_attempts = 0
        self.last_action_signatures = []

        self.agent_instance = shared.get("agent_instance")

        # Enhanced variable manager integration
        self.variable_manager = shared.get("variable_manager", self.agent_instance.variable_manager)
        context_manager = shared.get("context_manager")

        if self.variable_manager:
            # Store reasoning session context
            session_context = {
                "session_id": shared.get("session_id", "default"),
                "start_time": datetime.now().isoformat(),
                "query": shared.get("current_query", ""),
                "reasoning_mode": "outline_driven"
            }
            self.variable_manager.set("reasoning.current_session", session_context)
            # Load previous successful patterns from variables
            self._load_historical_patterns()

        #Build comprehensive system context via UnifiedContextManager
        system_context = await self._build_enhanced_system_context_unified(shared, context_manager)

        return {
            "original_query": shared.get("current_query", ""),
            "session_id": shared.get("session_id", "default"),
            "agent_instance": shared.get("agent_instance"),
            "variable_manager": self.variable_manager,
            "context_manager": context_manager,  #Context Manager Reference
            "system_context": system_context,
            "available_tools": shared.get("available_tools", []),
            "tool_capabilities": shared.get("tool_capabilities", {}),
            "fast_llm_model": shared.get("fast_llm_model"),
            "complex_llm_model": shared.get("complex_llm_model"),
            "progress_tracker": shared.get("progress_tracker"),
            "formatted_context": shared.get("formatted_context", {}),
            "historical_context": await self._get_historical_context_unified(context_manager, shared.get("session_id")),
            "capabilities_summary": shared.get("capabilities_summary", ""),
            # Sub-system references
            "llm_tool_node": shared.get("llm_tool_node_instance"),
            "task_planner": shared.get("task_planner_instance"),
            "task_executor": shared.get("task_executor_instance"),
        }

    async def exec_async(self, prep_res):
        """Enhanced main reasoning loop with outline-driven execution"""
        if not LITELLM_AVAILABLE:
            return await self._fallback_direct_response(prep_res)

        original_query = prep_res["original_query"]
        agent_instance = prep_res["agent_instance"]
        progress_tracker = prep_res.get("progress_tracker")

        # Initialize enhanced reasoning context
        await self._initialize_reasoning_session(prep_res, original_query)

        # STEP 1: MANDATORY OUTLINE CREATION
        if not self.outline:
            outline_result = await self._create_initial_outline(prep_res)
            if not outline_result:
                return await self._create_error_response(original_query, "Failed to create initial outline")

        final_result = None
        consecutive_no_progress = 0
        max_no_progress = 3

        # Enhanced main reasoning loop with strict progress tracking
        while self.current_reasoning_count < self.max_reasoning_loops:
            self.current_loop_count += 1
            loop_start_time = time.time()

            # Check for infinite loops
            if self._detect_infinite_loop():
                await self._trigger_auto_recovery(prep_res)
                if self.auto_recovery_attempts >= self.max_auto_recovery:
                    break

            # Auto-context management
            await self._manage_context_size()

            # Progress tracking
            if progress_tracker:
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="reasoning_loop",
                    timestamp=time.time(),
                    node_name="LLMReasonerNode",
                    status=NodeStatus.RUNNING,
                    metadata={
                        "loop_number": self.current_loop_count,
                        "outline_step": self.current_outline_step,
                        "outline_total": len(self.outline.get("steps", [])) if self.outline else 0,
                        "context_size": len(self.reasoning_context),
                        "task_stack_size": len(self.internal_task_stack),
                        "auto_recovery_attempts": self.auto_recovery_attempts,
                        "performance_metrics": self.performance_metrics
                    }
                ))

            try:
                # Build enhanced reasoning prompt with outline context
                reasoning_prompt = await self._build_outline_driven_prompt(prep_res)

                # Force progress check if needed
                if self.mandatory_progress_check and consecutive_no_progress >= 2:
                    reasoning_prompt += "\n\n**MANDATORY**: You must either complete current outline step or move to next step. No more analysis without action!"

                # LLM reasoning call
                model_to_use = prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o")

                llm_response = await agent_instance.a_run_llm_completion(
                    model=model_to_use,
                    messages=[{"role": "user", "content": reasoning_prompt}],
                    temperature=0.2,  # Lower temperature for more focused execution
                    # max_tokens=3072,
                    node_name="LLMReasonerNode",
                    stop="<immediate_context>",
                    task_id=f"reasoning_loop_{self.current_loop_count}_step_{self.current_outline_step}"
                )

                # Add LLM response to context
                self.reasoning_context.append({
                    "type": "reasoning",
                    "content": llm_response,
                    "loop": self.current_loop_count,
                    "outline_step": self.current_outline_step,
                    "timestamp": datetime.now().isoformat()
                })

                # Parse and execute meta-tool calls with enhanced tracking
                progress_made = await self._parse_and_execute_meta_tools(llm_response, prep_res)

                action_taken = progress_made.get("action_taken", False)
                actual_progress = progress_made.get("progress_made", False)

                # Update performance with correct progress indication
                self._update_performance_metrics(loop_start_time, actual_progress)

                if not action_taken:
                    self.current_reasoning_count += 1
                    if self.current_outline_step > len(self.outline.get("steps", [])):
                        progress_made["final_result"] = llm_response
                        rprint("Final result reached forced by outline step count")
                    if self.current_outline_step < len(self.outline.get("steps", [])) and self.outline.get("steps", [])[self.current_outline_step].get("is_final", False):
                        progress_made["final_result"] = llm_response
                        rprint("Final result reached forced by outline step count final step")
                else:
                    self.current_reasoning_count -= 1

                # Check for final result
                if progress_made.get("final_result"):
                    final_result = progress_made["final_result"]
                    await self._finalize_reasoning_session(prep_res, final_result)
                    break

                # Progress monitoring
                if progress_made.get("action_taken"):
                    consecutive_no_progress = 0
                    self._update_performance_metrics(loop_start_time, True)
                else:
                    consecutive_no_progress += 1
                    self._update_performance_metrics(loop_start_time, False)

                # Check outline completion
                if self.outline and self.current_outline_step >= len(self.outline.get("steps", []))+self.max_reasoning_loops:
                    # All outline steps completed, force final response
                    final_result = await self._create_outline_completion_response(prep_res)
                    break

                # Emergency break for excessive no-progress
                if consecutive_no_progress >= max_no_progress:
                    await self._trigger_auto_recovery(prep_res)

            except Exception as e:
                await self._handle_reasoning_error(e, prep_res, progress_tracker)
                import traceback
                print(traceback.format_exc())
                if self.auto_recovery_attempts >= self.max_auto_recovery:
                    final_result = await self._create_error_response(original_query, str(e))
                    break


        # If no final result after max loops, create a comprehensive summary
        if not final_result:
            final_result = await self._create_enhanced_timeout_response(original_query, prep_res)

        return {
            "final_result": final_result,
            "reasoning_loops": self.current_loop_count,
            "reasoning_context": self.reasoning_context.copy(),
            "internal_task_stack": self.internal_task_stack.copy(),
            "outline": self.outline,
            "outline_completion": self.current_outline_step,
            "performance_metrics": self.performance_metrics,
            "auto_recovery_attempts": self.auto_recovery_attempts
        }

    async def _build_enhanced_system_context_unified(self, shared, context_manager) -> str:
        """Build comprehensive system context mit UnifiedContextManager"""
        context_parts = []

        # Enhanced agent capabilities
        available_tools = shared.get("available_tools", [])
        if available_tools:
            context_parts.append(f"Available external tools: {', '.join(available_tools)}")

        #Context Manager Status
        if context_manager:
            session_stats = context_manager.get_session_statistics()
            context_parts.append(f"Context System: Advanced with {session_stats['total_sessions']} active sessions")
            context_parts.append(f"Cache Status: {session_stats['cache_entries']} cached contexts")

        # Variable system context
        if self.variable_manager:
            var_info = self.variable_manager.get_scope_info()
            context_parts.append(f"Variable System: {len(var_info)} scopes available")

            # Recent results availability
            results_count = len(self.variable_manager.get("results", {}))
            if results_count:
                context_parts.append(f"Previous results: {results_count} task results available")

        #Enhanced system state mit Context-Awareness
        session_id = shared.get("session_id", "default")
        if context_manager and session_id in context_manager.session_managers:
            session = context_manager.session_managers[session_id]
            if hasattr(session, 'history'):
                context_parts.append(f"Session History: {len(session.history)} conversation entries available")
            elif isinstance(session, dict) and 'history' in session:
                context_parts.append(f"Session History: {len(session['history'])} conversation entries (fallback mode)")

        # System state with enhanced details
        tasks = shared.get("tasks", {})
        if tasks:
            active_tasks = len([t for t in tasks.values() if t.status == "running"])
            completed_tasks = len([t for t in tasks.values() if t.status == "completed"])
            context_parts.append(f"Execution state: {active_tasks} active, {completed_tasks} completed tasks")

        # Performance history
        if hasattr(self, 'historical_successful_patterns'):
            context_parts.append(
                f"Historical patterns: {len(self.historical_successful_patterns)} successful patterns loaded")

        return "\n".join(context_parts) if context_parts else "Basic system context available"

    async def _get_historical_context_unified(self, context_manager, session_id: str) -> str:
        """Get historical context from UnifiedContextManager"""
        if not context_manager:
            return ""

        try:
            #Get unified context for historical analysis
            unified_context = await context_manager.build_unified_context(session_id, None, "historical")

            context_parts = []

            # Chat history insights
            chat_history = unified_context.get("chat_history", [])
            if chat_history:
                context_parts.append(f"Conversation History: {len(chat_history)} messages available")

                # Analyze conversation patterns
                user_queries = [msg['content'] for msg in chat_history if msg.get('role') == 'user']
                if user_queries:
                    avg_query_length = sum(len(q) for q in user_queries) / len(user_queries)
                    context_parts.append(f"Query patterns: Avg length {avg_query_length:.0f} chars")

            # Execution history from variables
            if self.variable_manager:
                # Recent successful queries
                recent_successes = self.variable_manager.get("reasoning.recent_successes", [])
                if recent_successes:
                    context_parts.append(f"Recent successful queries: {len(recent_successes)}")

                # Performance history
                avg_loops = self.variable_manager.get("reasoning.performance.avg_loops", 0)
                if avg_loops:
                    context_parts.append(f"Average reasoning loops: {avg_loops}")

            # System insights from unified context
            execution_state = unified_context.get("execution_state", {})
            if execution_state.get("recent_completions"):
                completions = execution_state["recent_completions"]
                context_parts.append(f"Recent completions: {len(completions)} tasks finished")

            return "\n".join(context_parts)

        except Exception as e:
            eprint(f"Failed to get historical context: {e}")
            return "Historical context unavailable"

    def _load_historical_patterns(self):
        """Load successful patterns from previous reasoning sessions"""
        if not self.variable_manager:
            return

        # Load successful outline patterns
        successful_outlines = self.variable_manager.get("reasoning.successful_patterns.outlines", [])
        failed_patterns = self.variable_manager.get("reasoning.failed_patterns", [])

        self.historical_successful_patterns = successful_outlines[-5:]  # Last 5 successful
        self.historical_failed_patterns = failed_patterns[-10:]  # Last 10 failed

    def _get_historical_context(self) -> str:
        """Get historical context from variable system"""
        if not self.variable_manager:
            return ""

        context_parts = []

        # Recent successful queries
        recent_successes = self.variable_manager.get("reasoning.recent_successes", [])
        if recent_successes:
            context_parts.append(f"Recent successful queries: {len(recent_successes)}")

        # Performance history
        avg_loops = self.variable_manager.get("reasoning.performance.avg_loops", 0)
        if avg_loops:
            context_parts.append(f"Average reasoning loops: {avg_loops}")

        # Common failure patterns to avoid
        failure_patterns = self.variable_manager.get("reasoning.failure_patterns", [])
        if failure_patterns:
            context_parts.append(f"Known failure patterns: {len(failure_patterns)}")

        return "\n".join(context_parts)

    async def _initialize_reasoning_session(self, prep_res, original_query):
        """Initialize enhanced reasoning session with variable tracking"""
        # Initialize reasoning context
        self.reasoning_context.append({
            "type": "session_start",
            "content": f"Enhanced reasoning session started for: {original_query}",
            "timestamp": datetime.now().isoformat(),
            "session_id": prep_res.get("session_id")
        })

        # Store session in variables
        if self.variable_manager:
            session_data = {
                "query": original_query,
                "start_time": datetime.now().isoformat(),
                "max_loops": self.max_reasoning_loops,
                "context_management": "auto_summary",
                "outline_driven": True
            }
            self.variable_manager.set("reasoning.current_session.data", session_data)

        # Add enhanced system context
        self.reasoning_context.append({
            "type": "system_context",
            "content": prep_res["system_context"],
            "timestamp": datetime.now().isoformat()
        })

        # Add historical context if available
        historical = prep_res.get("historical_context")
        if historical:
            self.reasoning_context.append({
                "type": "historical_context",
                "content": historical,
                "timestamp": datetime.now().isoformat()
            })

    async def _create_initial_outline(self, prep_res) -> bool:
        """Create mandatory initial outline, with a fast path for simple queries."""
        original_query = prep_res["original_query"]
        agent_instance = prep_res["agent_instance"]

        outline_prompt = f"""You MUST create an initial execution outline for this query. This is mandatory.

**Query:** {original_query}

**Available Resources:**
- Tools: {', '.join(prep_res.get('available_tools', []))}
- Sub-systems: LLM Tool Node, Task Planner, Task Executor

LLM Tool Node is for all tool calls!
LLM Tool Node is best for simple multi-step tasks like fetching data from a tool and summarizing it.
Task Planner is best for complex tasks with multiple dependencies and complex task flows.

**Historical Context:** {prep_res.get('historical_context', 'None')}

**Fast Path for Simple Queries:**
If the query is simple and can be answered directly without needing tools or complex reasoning, you MUST create a single-step outline using the `direct_response` method.

Create a structured outline using this EXACT format:

OUTLINE_START
Step 1: [Brief description of first step]
- Method: [internal_reasoning | delegate_to_llm_tool_node | create_and_execute_plan | direct_response]
- Expected outcome: [What this step should achieve]
- Success criteria: [How to know this step is complete]

[For complex queries, continue with more steps as needed.]

Final Step: Synthesize results and provide comprehensive response
- Method: direct_response
- Expected outcome: Complete answer to user query
- Success criteria: User query fully addressed
OUTLINE_END

**Requirements:**
1. Outline must have between 1 and 7 steps.
2. For simple queries, a single "Final Step" using the 'direct_response' method is the correct approach.
3. Each step must have clear success criteria and build logically toward the answer.
4. Be specific about which meta-tools to use for each step. meta-tools ar not Tools ! avalabel meta-tools *Method* (internal_reasoning, delegate_to_llm_tool_node, create_and_execute_plan, direct_response) no exceptions

Create the outline now:"""

        try:
            llm_response = await agent_instance.a_run_llm_completion(
                model=prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o"),
                messages=[{"role": "user", "content": outline_prompt}],
                temperature=0.2,  # Lower temperature for more deterministic outlining
                max_tokens=2048,
                node_name="LLMReasonerNode",
                task_id="create_initial_outline"
            )

            # Parse outline from response
            outline = self._parse_outline_from_response(llm_response)

            if self.agent_instance and self.agent_instance.progress_tracker:
                await self.agent_instance.progress_tracker.emit_event(ProgressEvent(
                    event_type="outline_created",
                    timestamp=time.time(),
                    node_name="LLMReasonerNode",
                    status=NodeStatus.COMPLETED,
                    task_id="create_initial_outline",
                    metadata={"outline": outline}
                ))

            if outline:
                self.outline = outline
                self.current_outline_step = 0

                # Store outline in variables
                if self.variable_manager:
                    self.variable_manager.set("reasoning.current_session.outline", outline)

                # Add to reasoning context
                self.reasoning_context.append({
                    "type": "outline_created",
                    "content": f"Created outline with {len(outline.get('steps', []))} steps",
                    "outline": outline,
                    "timestamp": datetime.now().isoformat()
                })

                return True
            else:
                return False

        except Exception as e:
            eprint(f"Failed to create initial outline: {e}")
            return False

    def _parse_outline_from_response(self, response: str) -> dict[str, Any]:
        """Parse structured outline from LLM response"""
        import re

        # Find outline section
        outline_match = re.search(r'OUTLINE_START(.*?)OUTLINE_END', response, re.DOTALL)
        if not outline_match:
            return None

        outline_text = outline_match.group(1).strip()

        # Parse steps
        steps = []
        current_step = None

        for line in outline_text.split('\n'):
            line = line.strip()
            if not line:
                continue

            # New step
            if re.match(r'^Step \d+:', line):
                if current_step:
                    steps.append(current_step)

                current_step = {
                    "description": re.sub(r'^Step \d+:\s*', '', line),
                    "method": "",
                    "expected_outcome": "",
                    "success_criteria": "",
                    "status": "pending"
                }
            elif re.match(r'^Final Step:', line):
                if current_step:
                    steps.append(current_step)

                current_step = {
                    "description": re.sub(r'^Final Step:\s*', '', line),
                    "method": "direct_response",
                    "expected_outcome": "",
                    "success_criteria": "",
                    "status": "pending",
                    "is_final": True
                }
            elif current_step and line.startswith('- Method:'):
                current_step["method"] = line.replace('- Method:', '').strip()
            elif current_step and line.startswith('- Expected outcome:'):
                current_step["expected_outcome"] = line.replace('- Expected outcome:', '').strip()
            elif current_step and line.startswith('- Success criteria:'):
                current_step["success_criteria"] = line.replace('- Success criteria:', '').strip()

        # Add final step if exists
        if current_step:
            steps.append(current_step)

        if not steps:
            return None

        return {
            "steps": steps,
            "created_at": datetime.now().isoformat(),
            "total_steps": len(steps)
        }

    def _build_enhanced_system_context(self, shared) -> str:
        """Build comprehensive system context with variable system info"""
        context_parts = []

        # Enhanced agent capabilities
        available_tools = shared.get("available_tools", [])
        if available_tools:
            context_parts.append(f"Available external tools: {', '.join(available_tools)}")

        # Variable system context
        if self.variable_manager:
            var_info = self.variable_manager.get_scope_info()
            context_parts.append(f"Variable System: {len(var_info)} scopes available")

            # Recent results availability
            results_count = len(self.variable_manager.get("results", {}))
            if results_count:
                context_parts.append(f"Previous results: {results_count} task results available")

        # System state with enhanced details
        tasks = shared.get("tasks", {})
        if tasks:
            active_tasks = len([t for t in tasks.values() if t.status == "running"])
            completed_tasks = len([t for t in tasks.values() if t.status == "completed"])
            context_parts.append(f"Execution state: {active_tasks} active, {completed_tasks} completed tasks")

        # Session context with history
        formatted_context = shared.get("formatted_context", {})
        if formatted_context:
            recent_interaction = formatted_context.get("recent_interaction", "")
            if recent_interaction:
                context_parts.append(f"Recent interaction: {recent_interaction[:100000]}...")

        # Performance history
        if hasattr(self, 'historical_successful_patterns'):
            context_parts.append(
                f"Historical patterns: {len(self.historical_successful_patterns)} successful patterns loaded")

        return "\n".join(context_parts) if context_parts else "Basic system context available"

    async def _manage_context_size(self):
        """Auto-manage context size with intelligent summarization"""
        if len(self.reasoning_context) <= self.context_summary_threshold:
            return

        # Trigger summarization
        if len(self.reasoning_context) >= self.max_context_size:
            # Emergency summarization
            await self._emergency_context_summary()
        elif len(self.reasoning_context) >= self.context_summary_threshold:
            # Regular summarization
            await self._regular_context_summary()

    async def _regular_context_summary(self):
        """Regular context summarization when threshold is reached"""
        # Keep last 10 entries, summarize the rest
        keep_recent = self.reasoning_context[-10:]
        to_summarize = self.reasoning_context[:-10]

        summary = self._create_context_summary(to_summarize, "regular")

        # Replace old context with summary + recent
        self.reasoning_context = [
                                     {
                                         "type": "context_summary",
                                         "content": summary,
                                         "summarized_entries": len(to_summarize),
                                         "summary_type": "regular",
                                         "timestamp": datetime.now().isoformat()
                                     }
                                 ] + keep_recent

    async def _emergency_context_summary(self):
        """Emergency context summarization when max size is reached"""
        # Keep last 5 entries, summarize everything else
        keep_recent = self.reasoning_context[-5:]
        to_summarize = self.reasoning_context[:-5]

        summary = self._create_context_summary(to_summarize, "emergency")

        # Replace with emergency summary
        self.reasoning_context = [
                                     {
                                         "type": "context_summary",
                                         "content": summary,
                                         "summarized_entries": len(to_summarize),
                                         "summary_type": "emergency",
                                         "timestamp": datetime.now().isoformat()
                                     }
                                 ] + keep_recent

    def _create_context_summary(self, entries: list[dict], summary_type: str) -> str:
        """Create intelligent context summary"""
        if not entries:
            return "No context to summarize"

        summary_parts = []

        # Group by type
        by_type = {}
        for entry in entries:
            entry_type = entry.get("type", "unknown")
            if entry_type not in by_type:
                by_type[entry_type] = []
            by_type[entry_type].append(entry)

        # Summarize each type
        for entry_type, type_entries in by_type.items():
            if entry_type == "reasoning":
                reasoning_summary = f"Completed {len(type_entries)} reasoning cycles"
                # Extract key insights
                insights = []
                for entry in type_entries[-3:]:  # Last 3 reasoning entries
                    content = entry.get("content", "")[:1000] + "..."
                    insights.append(content)
                if insights:
                    reasoning_summary += f"\nKey recent reasoning: {'; '.join(insights)}"
                summary_parts.append(reasoning_summary)

            elif entry_type == "meta_tool_result":
                results_summary = f"Executed {len(type_entries)} meta-tool operations"
                # Extract significant results
                significant_results = [
                    entry.get("content", "")[:800]
                    for entry in type_entries
                    if len(entry.get("content", "")) > 50
                ]
                if significant_results:
                    results_summary += f"\nSignificant results: {'; '.join(significant_results[-3:])}"
                summary_parts.append(results_summary)

            else:
                summary_parts.append(f"{entry_type}: {len(type_entries)} entries")

        summary = f"[{summary_type.upper()} SUMMARY] " + "; ".join(summary_parts)

        # Store summary in variables for future reference
        if self.variable_manager:
            summary_data = {
                "type": summary_type,
                "entries_summarized": len(entries),
                "summary": summary,
                "timestamp": datetime.now().isoformat()
            }
            summaries = self.variable_manager.get("reasoning.context_summaries", [])
            summaries.append(summary_data)
            self.variable_manager.set("reasoning.context_summaries", summaries[-10:])  # Keep last 10

        return summary

    def _get_pending_tasks_summary(self) -> str:
        """Get summary of pending tasks requiring attention"""
        if not self.internal_task_stack:
            return "⚠️ NO TASKS IN STACK - You must create tasks from your outline immediately!"

        pending_tasks = [task for task in self.internal_task_stack if task.get("status", "pending") == "pending"]

        if not pending_tasks:
            return "✅ No pending tasks - ready for next outline step or completion"

        task_summaries = []
        for i, task in enumerate(pending_tasks[:3], 1):
            desc = task.get("description", "No description")[:150] + "..." if len(
                task.get("description", "")) > 50 else task.get("description", "")
            step_ref = task.get("outline_step_ref", "")
            step_info = f" ({step_ref})" if step_ref else ""
            task_summaries.append(f"{i}. {desc}{step_info}")

        if len(pending_tasks) > 3:
            task_summaries.append(f"... +{len(pending_tasks) - 3} more pending tasks")

        return f"📋 {len(pending_tasks)} pending tasks:\n" + "\n".join(task_summaries)

    async def _build_outline_driven_prompt(self, prep_res) -> str:
        """Build outline-driven reasoning prompt mit UnifiedContextManager Integration"""

        # Get current task with enhanced visibility
        current_stack_task = self._get_current_stack_task()

        #Enhanced context aus UnifiedContextManager
        context_manager = prep_res.get("context_manager")
        session_id = prep_res.get("session_id", "default")

        # Build unified context sections
        unified_context_summary = ""
        recent_results_context = ""

        if context_manager:
            try:
                # Get full unified context
                unified_context = await  context_manager.build_unified_context(session_id, prep_res.get('original_query'))

                unified_context_summary = self._format_unified_context_for_reasoning(unified_context)
                recent_results_context = self._build_recent_results_from_unified_context(unified_context)
            except Exception as e:
                eprint(f"Failed to get unified context in reasoning prompt: {e}")
                unified_context_summary = "Unified context unavailable"
                recent_results_context = "**No recent results available**"

        # Enhanced context summaries (keeping existing functionality)
        context_summary = self._summarize_reasoning_context()
        task_stack_summary = self._summarize_task_stack()
        outline_status = self._get_current_step_requirements()
        performance_context = self._get_performance_context()

        # Enhanced variable system integration with better suggestions
        variable_context = ""
        variable_suggestions = []
        if self.variable_manager:
            variable_context = self.variable_manager.get_llm_variable_context()
            query_text = prep_res.get('original_query', '')
            if current_stack_task:
                query_text += " " + current_stack_task.get('description', '')
            variable_suggestions = self.variable_manager.get_variable_suggestions(query_text)

        immediate_context = self._get_immediate_context_for_prompt()
        # Detect if we're in a potential loop situation
        loop_warning = self._generate_loop_warning()

        prompt = f"""You are the enhanced strategic reasoning core operating in OUTLINE-DRIVEN MODE with MANDATORY TASK STACK enforcement.
## ABSOLUTE REQUIREMENTS - VIOLATION = IMMEDIATE STOP:
1. **WORK ONLY THROUGH TASK STACK** - No work outside the stack permitted
2. **SEE CURRENT TASK DIRECTLY** - Your current task is shown below
3. **USE VARIABLE SYSTEM** - All results are automatically stored and accessible
4. **USE UNIFIED CONTEXT** - Rich conversation and execution history is available
5. **MARK TASKS COMPLETE** - Every finished task must be marked complete
6. **NO REPEATED ACTIONS** - Check variables first before re-doing work

{loop_warning}

## <CURRENT SITUATION>:
**Original Query:** {prep_res['original_query']}

**Unified Context Summary:**
{unified_context_summary}

**Current Context Summary:**
{context_summary}

**Current Outline Status:**
{outline_status}

** CURRENT TASK FROM STACK:**
{current_stack_task}

**Internal Task Stack:**
{task_stack_summary}

**Performance Metrics:**
{performance_context}

## ENHANCED CONTEXT INTEGRATION:
{variable_context}

** SUGGESTED VARIABLES for current task:**
{', '.join(variable_suggestions[:10]) if variable_suggestions else 'tool_capabilities, query, model_complex, available_tools, timestamp, use_fast_response, tool_registry, name, current_query, current_session'}

** UNIFIED CONTEXT RESULTS ACCESS:**
{recent_results_context}

</CURRENT SITUATION>

## MANDATORY TASK STACK ENFORCEMENT:
**CRITICAL RULE**: You MUST work exclusively through your internal task stack.

**TASK STACK WORKFLOW (MANDATORY):**
1. **CHECK CURRENT TASK**: Your current task is: {current_stack_task.get('description', 'NO CURRENT TASK - ADD TASKS FROM OUTLINE!') if current_stack_task else 'NO CURRENT TASK - VIOLATION!'}

2. **WORK ONLY ON STACK TASKS**: You can ONLY work on tasks that exist in your internal task stack
   - The task you're working on MUST be in the stack with status "pending"
   - Before any action: Verify the task exists in your stack

3. **MANDATORY TASK COMPLETION**: After completing any work, you MUST mark the task as complete
   - Use: META_TOOL_CALL: manage_internal_task_stack(action="complete", task_description="[exact task description]", outline_step_ref="step_X")

4. **CHECK UNIFIED CONTEXT FIRST**: Before any major action, focus your attention to the variable system to see if results already exist
   - Avalabel results are automatically stored in the variable system
   - The unified context above shows available conversation history and execution state

**CURRENT TASK ANALYSIS:**
{self._analyze_current_task(current_stack_task) if current_stack_task else "❌ NO CURRENT TASK - You must add tasks from your outline!"}

## AVAILABLE META-TOOLS:
You have access to these meta-tools to control sub-systems. Use the EXACT syntax shown:

**META_TOOL_CALL: internal_reasoning(thought: str, thought_number: int, total_thoughts: int, next_thought_needed: bool, current_focus: str, key_insights: list[str], potential_issues: list[str], confidence_level: float)**
- Purpose: Structure your thinking process explicitly
- Use for: Any complex analysis, planning, or problem decomposition
- Example: META_TOOL_CALL: internal_reasoning(thought="I need to break this down into steps", thought_number=1, total_thoughts=3, next_thought_needed=true, current_focus="problem analysis", key_insights=["Query requires multiple data sources"], potential_issues=["Data might not be available"], confidence_level=0.8)

**META_TOOL_CALL: manage_internal_task_stack(action: str, task_description: str)**
- Purpose: Manage your high-level to-do list
- Actions: "add", "remove", "complete", "get_current"
- Example: META_TOOL_CALL: manage_internal_task_stack(action="add", task_description="Research competitor analysis data")

**META_TOOL_CALL: delegate_to_llm_tool_node(task_description: str, tools_list: list[str])**
- Purpose: Delegate specific, self-contained tasks requiring external tools
- Use for: Web searches, file operations, API calls, single-, two-, or three-step tool usage
- Example: META_TOOL_CALL: delegate_to_llm_tool_node(task_description="Search for latest news about AI developments", tools_list=["search_web"])
- Rule: always validate delegate_to_llm_tool_node result. will be available in <immediate_context> after execution!

**META_TOOL_CALL: create_and_execute_plan(goals: list[str])**
- Purpose: Handle complex, multi-step projects with dependencies
- Use for: Tasks requiring coordination, parallel execution, or complex workflows
- Example: META_TOOL_CALL: create_and_execute_plan(goals=["Research company A financial data", "Research company B financial data", "Compare {{results.task_1.data}} and {{results.task_2.data}} and create report"])
- Rule: always validate create_and_execute_plan result. will be available in <immediate_context> after execution!

**META_TOOL_CALL: read_from_variables(scope: str, key: str, purpose: str)**
- Unified context data is available in various scopes
- Example: META_TOOL_CALL: read_from_variables(scope="user", key="name", purpose="Gather user information for later reference")

**META_TOOL_CALL: write_to_variables(scope: str, key: str, value: any, description: str)**
- Store important findings immediately
- Example: META_TOOL_CALL: write_to_variables(scope="user", key="name", value="User-Name", description="The users name for later reference")

**META_TOOL_CALL: advance_outline_step(step_completed: bool, completion_evidence: str, next_step_focus: str)**
- Mark outline steps complete when all related tasks done

**META_TOOL_CALL: direct_response(final_answer: str, outline_completion: bool, steps_completed: list[str])**
- ONLY when ALL outline steps complete or no META_TOOL_CALL needed
- final_answer must contain the full final answer for the user with all necessary context and informations ( format in persona style )
- Purpose: End reasoning and provide final answer to user
- Use when: Query is complete or can be answered directly
- Example: META_TOOL_CALL: direct_response(final_answer="Based on my analysis, here are the key findings...")

note: in this interaction only META_TOOL_CALL ar avalabel. for other tools use META_TOOL_CALL: delegate_to_llm_tool_node with the appropriate tool names!

## REASONING STRATEGY:
1. **Start with internal_reasoning** to understand the query and plan approach
2. **Use manage_internal_task_stack** to track high-level steps
3. **Choose the right delegation strategy:**
   - Simple queries → direct_response
   - Up to 3 tool tasks with llm action → delegate_to_llm_tool_node
   - Complex projects → create_and_execute_plan
4. **Monitor progress** and adapt your approach
5. **End with direct_response** when complete

## EXAMPLES OF GOOD REASONING PATTERNS:

**Simple Query Pattern:**
META_TOOL_CALL: internal_reasoning(thought="This is a straightforward question I can answer directly", thought_number=1, total_thoughts=1, next_thought_needed=false, current_focus="direct response", key_insights=["No external data needed"], potential_issues=[], confidence_level=0.9)
META_TOOL_CALL: direct_response(final_answer="...")

**Research Task Pattern:**
META_TOOL_CALL: internal_reasoning(thought="I need to gather information from external sources", ...)
META_TOOL_CALL: manage_internal_task_stack(action="add", task_description="Research topic X")
META_TOOL_CALL: delegate_to_llm_tool_node(task_description="Search for information about X", tools_list=["search_web"])
[Wait for result]
META_TOOL_CALL: internal_reasoning(thought="I have the research data, now I can formulate response", ...)
META_TOOL_CALL: direct_response(final_answer="Based on my research: ...")

**Complex Project Pattern:**
META_TOOL_CALL: internal_reasoning(thought="This requires multiple steps with dependencies", ...)
META_TOOL_CALL: create_and_execute_plan(goals=["Step 1: Gather data A", "Step 2: Gather data B", "Step 3: Analyze A and B together", "Step 4: Create final report"])
[Wait for plan completion]
META_TOOL_CALL: direct_response(final_answer="I've completed your complex request...")

## ENHANCED ANTI-LOOP ENFORCEMENT:
- Current Loop: {self.current_loop_count}/{self.max_reasoning_loops}
- Auto-Recovery Attempts: {getattr(self, 'auto_recovery_attempts', 0)}/{getattr(self, 'max_auto_recovery', 3)}
- Last Actions: {', '.join(getattr(self, 'last_action_signatures', [])[-3:]) if hasattr(self, 'last_action_signatures') else 'None'}

**⚠️ LOOP PREVENTION RULES:**
1. If you just read a variable, DO NOT read the same variable again
2. If you completed a task, DO NOT repeat the same work
3. If results exist in unified context, DO NOT recreate them
4. Always advance to next logical step

{self._get_current_step_requirements()}

## YOUR NEXT ACTION (Choose ONE):
Based on your current task, unified context, and available variables, what is your next concrete action?

**DECISION TREE:**
1. ❓ No current task? → Add tasks from outline
2. 📖 Current task needs data? → Check variables and unified context first (read_from_variables)
3. 🔧 Need to execute tools and reason over up to 3 steps? → Use delegate_to_llm_tool_node
4. ✅ Task complete? → Mark complete and advance
5. 🎯 All outline done? → Provide direct_response

Latest unified context: (note delegation results could be wrong or misleading)
<immediate_context>
{immediate_context}
</immediate_context>

must validate <immediate_context> output!
- validate the <immediate_context> output! before proceeding with the outline!
- output compleat fail -> direct_response
- informations missing or output recovery needed -> repeat step with a different strategy
- not enough structure -> use create_and_execute_plan meta-tool call
- output is valid -> continue with the outline!
- if dynamic Planing is needed, you must use the appropriate meta-tool call

**Remember**:
- work step by step max call 3 meta-tool calls in one run.
- only use direct_response if the outline is complete and context from <immediate_context> is enough to answer the query!
- Your job is to work systematically through your outline using your task stack, while leveraging the unified context system to avoid duplicate work and maintain context."""

        return prompt

    def _format_unified_context_for_reasoning(self, unified_context: dict[str, Any]) -> str:
        """Format unified context für reasoning prompt"""
        try:
            context_parts = []

            # Session info
            session_stats = unified_context.get('session_stats', {})
            context_parts.append(
                f"Session: {unified_context.get('session_id', 'unknown')} with {session_stats.get('current_session_length', 0)} messages")

            # Chat history summary
            chat_history = unified_context.get('chat_history', [])
            if chat_history:
                recent_messages = len([msg for msg in chat_history if msg.get('role') == 'user'])
                context_parts.append(f"Conversation: {recent_messages} user queries in current context")

                # Show last user message for reference
                last_user_msg = None
                for msg in reversed(chat_history):
                    if msg.get('role') == 'user':
                        last_user_msg = msg.get('content', '')[:100] + "..."
                        break
                if last_user_msg:
                    context_parts.append(f"Latest user query: {last_user_msg}")

            # Execution state
            execution_state = unified_context.get('execution_state', {})
            active_tasks = execution_state.get('active_tasks', [])
            recent_completions = execution_state.get('recent_completions', [])
            if active_tasks or recent_completions:
                context_parts.append(
                    f"Execution: {len(active_tasks)} active, {len(recent_completions)} completed tasks")

            # Available data
            variables = unified_context.get('variables', {})
            recent_results = variables.get('recent_results', [])
            if recent_results:
                context_parts.append(f"Available Results: {len(recent_results)} recent task results accessible")

            return "\n".join(context_parts)

        except Exception as e:
            return f"Error formatting unified context: {str(e)}"

    def _build_recent_results_from_unified_context(self, unified_context: dict[str, Any]) -> str:
        """Build recent results context from unified context"""
        try:
            variables = unified_context.get('variables', {})
            recent_results = variables.get('recent_results', [])

            if not recent_results:
                return "**No recent results available from unified context**"

            result_context = """**🔍 RECENT RESULTS FROM UNIFIED CONTEXT:**"""

            for i, result in enumerate(recent_results[:3], 1):  # Top 3 results
                task_id = result.get('task_id', f'result_{i}')
                preview = result.get('preview', 'No preview')
                success = result.get('success', False)
                status_icon = "✅" if success else "❌"

                result_context += f"\n{status_icon} {task_id}: {preview}"

            result_context += "\n\n**Quick Access Keys Available:**"
            result_context += "\n- Use read_from_variables(scope='results', key='task_id.data') for specific results"
            result_context += "\n- Check delegation.latest for most recent delegation results"

            return result_context

        except Exception as e:
            return f"**Error accessing recent results: {str(e)}**"

    def _generate_loop_warning(self) -> str:
        """Generate loop warning if repetitive behavior detected"""
        if len(self.last_action_signatures) >= 3:
            recent_actions = self.last_action_signatures[-3:]
            if len(set(recent_actions)) <= 2:
                return """
⚠️ **LOOP WARNING DETECTED** ⚠️
You are repeating similar actions. MUST change approach:
- If you just read variables, act on the results
- If you delegated tasks, check the results
- Complete current task and advance to next step
- DO NOT repeat the same meta-tool calls
    """
        return ""

    def _get_current_stack_task(self) -> dict[str, Any]:
        """Get current pending task from stack for direct visibility"""
        if not self.internal_task_stack:
            return {}

        pending_tasks = [task for task in self.internal_task_stack if task.get("status", "pending") == "pending"]
        if pending_tasks:
            current_task = pending_tasks[0]  # Get first pending task
            return {
                "description": current_task.get("description", ""),
                "outline_step_ref": current_task.get("outline_step_ref", ""),
                "status": current_task.get("status", "pending"),
                "added_at": current_task.get("added_at", ""),
                "task_index": self.internal_task_stack.index(current_task) + 1,
                "total_tasks": len(self.internal_task_stack)
            }

        return {}

    def _analyze_current_task(self, current_task: dict[str, Any]) -> str:
        """Analyze current task and provide guidance"""
        if not current_task:
            return "❌ NO CURRENT TASK - Add tasks from your outline immediately!"

        description = current_task.get("description", "")
        outline_ref = current_task.get("outline_step_ref", "")

        analysis = f"""CURRENT TASK IDENTIFIED:
Task: {description}
Outline Reference: {outline_ref}
Position: {current_task.get('task_index', '?')}/{current_task.get('total_tasks', '?')}

RECOMMENDED ACTION:"""

        # Analyze task content for recommendations
        if "read" in description.lower() or "file" in description.lower():
            analysis += "\n1. Check if file content already exists in variables (read_from_variables)"
            analysis += "\n2. If not found, use delegate_to_llm_tool_node with read_file tool"
        elif "write" in description.lower() or "create" in description.lower():
            analysis += "\n1. Check if content is ready in variables"
            analysis += "\n2. Use delegate_to_llm_tool_node with write_file tool"
        elif "analyze" in description.lower() or "question" in description.lower():
            analysis += "\n1. Read existing data from variables"
            analysis += "\n2. Process the information and provide direct_response"
        else:
            analysis += "\n1. Break down the task into specific actions"
            analysis += "\n2. Verify last Task Delegation results"

        return analysis

    def _get_immediate_context_for_prompt(self) -> str:
        """Get immediate context additions from recent meta-tool executions"""
        recent_results = [
            entry for entry in self.reasoning_context[-5:]  # Last 5 entries
            if entry.get("type") == "meta_tool_result"
        ]

        if not recent_results:
            return "No recent meta-tool results"

        context_parts = ["📊 IMMEDIATE CONTEXT FROM RECENT ACTIONS:"]

        for result in recent_results:
            meta_tool = result.get("meta_tool", "unknown")
            content = result.get("content", "")
            loop = result.get("loop", "?")

            # Format based on meta-tool type
            if meta_tool == "delegate_to_llm_tool_node":
                context_parts.append(f"✅ DELEGATION RESULT (Loop {loop}):")
                context_parts.append(f"   {content}")
            elif meta_tool == "read_from_variables":
                context_parts.append(f"📖 VARIABLE READ (Loop {loop}):")
                context_parts.append(f"   {content}")
            elif meta_tool == "manage_internal_task_stack":
                context_parts.append(f"📋 TASK UPDATE (Loop {loop}):")
                context_parts.append(f"   {content}")
            else:
                context_parts.append(f"🔧 {meta_tool.upper()} (Loop {loop}):")
                context_parts.append(f"   {content}")

        return "\n".join(context_parts)

    def _summarize_reasoning_context(self) -> str:
        """Enhanced reasoning context summary with immediate result visibility"""
        if not self.reasoning_context:
            return "No previous reasoning steps"

        # Separate different types of context entries
        reasoning_entries = []
        meta_tool_results = []
        errors = []

        for entry in self.reasoning_context:
            entry_type = entry.get("type", "unknown")

            if entry_type == "reasoning":
                reasoning_entries.append(entry)
            elif entry_type == "meta_tool_result":
                meta_tool_results.append(entry)
            elif entry_type == "error":
                errors.append(entry)

        summary_parts = []

        # Show recent meta-tool results FIRST for immediate visibility
        if meta_tool_results:
            summary_parts.append("🔍 RECENT RESULTS:")
            for result in meta_tool_results[-3:]:  # Last 3 results
                meta_tool = result.get("meta_tool", "unknown")
                content = result.get("content", "")[:3000] + "..."
                loop = result.get("loop", "?")
                summary_parts.append(f"  [{meta_tool}] Loop {loop}: {content}")

        # Show reasoning summary
        if reasoning_entries:
            summary_parts.append(f"\n💭 REASONING: {len(reasoning_entries)} reasoning cycles completed")

        # Show errors if any
        if errors:
            summary_parts.append(f"\n⚠️ ERRORS: {len(errors)} errors encountered")
            for error in errors[-2:]:  # Last 2 errors
                content = error.get("content", "")[:1500]
                summary_parts.append(f"  Error: {content}")

        return "\n".join(summary_parts)

    def _get_current_step_requirements(self) -> str:
        """Get requirements for current outline step"""
        if not self.outline or not self.outline.get("steps"):
            return "ERROR: No outline available"

        steps = self.outline["steps"]
        if self.current_outline_step >= len(steps):
            return "All outline steps completed - must provide final response"

        current_step = steps[self.current_outline_step]

        requirements = f"""CURRENT STEP FOCUS:
Description: {current_step.get('description', 'Unknown')}
Required Method: {current_step.get('method', 'Unknown')}
Expected Outcome: {current_step.get('expected_outcome', 'Unknown')}
Success Criteria: {current_step.get('success_criteria', 'Unknown')}
Current Status: {current_step.get('status', 'pending')}

You MUST use the specified method and achieve the expected outcome before advancing."""

        return requirements

    def _get_performance_context(self) -> str:
        """Get performance context with accurate metrics"""
        if not self.performance_metrics:
            return "No performance metrics available"

        metrics_parts = []

        # Core metrics
        avg_time = self.performance_metrics.get("avg_loop_time", 0)
        efficiency = self.performance_metrics.get("action_efficiency", 0)
        total_loops = self.performance_metrics.get("total_loops", 0)
        progress_loops = self.performance_metrics.get("progress_loops", 0)

        metrics_parts.append(f"Avg Loop Time: {avg_time:.2f}s")
        metrics_parts.append(f"Progress Rate: {efficiency:.1%}")
        metrics_parts.append(f"Action Efficiency: {efficiency:.1%}")

        # Performance warnings
        if total_loops > 3 and efficiency < 0.5:
            metrics_parts.append("⚠️ LOW EFFICIENCY - Need more progress actions")
        elif total_loops > 5 and efficiency < 0.3:
            metrics_parts.append("🔴 VERY LOW EFFICIENCY - Review approach")

        # Loop detection warning based on actual metrics
        if len(self.last_action_signatures) > 3:
            unique_recent = len(set(self.last_action_signatures[-3:]))
            if unique_recent <= 1:
                metrics_parts.append("⚠️ LOOP PATTERN DETECTED - Change approach required")

        return "; ".join(metrics_parts)

    def _track_action_type(self, action_type: str, success: bool = True):
        """Track specific action types for detailed performance analysis"""
        if not hasattr(self, 'action_tracking'):
            self.action_tracking = {}

        if action_type not in self.action_tracking:
            self.action_tracking[action_type] = {"total": 0, "successful": 0}

        self.action_tracking[action_type]["total"] += 1
        if success:
            self.action_tracking[action_type]["successful"] += 1

        # Update overall action efficiency based on all action types
        total_actions = sum(stats["total"] for stats in self.action_tracking.values())
        successful_actions = sum(stats["successful"] for stats in self.action_tracking.values())

        if total_actions > 0:
            self.performance_metrics["detailed_action_efficiency"] = successful_actions / total_actions


    def _detect_infinite_loop(self) -> bool:
        """Enhanced infinite loop detection with multiple patterns"""
        if len(self.last_action_signatures) < 3:
            return False

        # 1. Immediate repetition (same action 3+ times)
        recent_actions = self.last_action_signatures[-3:]
        if len(set(recent_actions)) == 1:
            return True

        # 2. Pattern repetition (AB-AB-AB pattern)
        if len(self.last_action_signatures) >= 6:
            pattern1 = self.last_action_signatures[-6:-3]
            pattern2 = self.last_action_signatures[-3:]
            if pattern1 == pattern2:
                return True

        # 3. Variable read loops (multiple reads of same variable)
        variable_reads = [sig for sig in self.last_action_signatures if sig.startswith("read_from_variables")]
        if len(variable_reads) >= 3:
            # Extract variable signatures from recent reads
            recent_var_reads = variable_reads[-3:]
            if len(set(recent_var_reads)) <= 2:  # Repeated variable reads
                return True

        # 4. No outline progress for extended loops
        if self.current_loop_count > 5:
            if not hasattr(self, '_last_step_progress_loop'):
                self._last_step_progress_loop = {}

            last_progress = self._last_step_progress_loop.get(self.current_outline_step, 0)
            if self.current_loop_count - last_progress > 4:  # No step progress for 4+ loops
                return True

        # 5. Same task stack state for multiple loops
        if hasattr(self, '_task_stack_states'):
            stack_signature = hash(
                str([(t.get('status'), t.get('description')[:20]) for t in self.internal_task_stack]))
            if stack_signature in self._task_stack_states:
                repetitions = self._task_stack_states[stack_signature]
                if repetitions >= 4:
                    return True
                self._task_stack_states[stack_signature] = repetitions + 1
            else:
                self._task_stack_states[stack_signature] = 1
        else:
            self._task_stack_states = {}

        return False

    async def _trigger_auto_recovery(self, prep_res):
        """Trigger auto-recovery mechanism"""
        self.auto_recovery_attempts += 1

        # Store failure pattern
        if self.variable_manager:
            failure_data = {
                "timestamp": datetime.now().isoformat(),
                "loop_count": self.current_loop_count,
                "outline_step": self.current_outline_step,
                "last_actions": self.last_action_signatures[-5:],
                "recovery_attempt": self.auto_recovery_attempts
            }
            failures = self.variable_manager.get("reasoning.failure_patterns", [])
            failures.append(failure_data)
            self.variable_manager.set("reasoning.failure_patterns", failures[-20:])  # Keep last 20

        # Recovery strategies
        if self.auto_recovery_attempts == 1:
            # Force outline step advancement
            await self._force_outline_advancement(prep_res)
        elif self.auto_recovery_attempts == 2:
            # Skip current step and move to next
            await self._emergency_step_skip(prep_res)
        else:
            # Final emergency: force completion
            await self._emergency_completion(prep_res)

    async def _force_outline_advancement(self, prep_res):
        """Force advancement to next outline step"""
        if self.outline and self.current_outline_step < len(self.outline["steps"]):
            current_step = self.outline["steps"][self.current_outline_step]
            current_step["status"] = "force_completed"
            current_step["completion_method"] = "auto_recovery"

            self.current_outline_step += 1

            # Add to context
            self.reasoning_context.append({
                "type": "auto_recovery",
                "content": f"Force advanced to step {self.current_outline_step + 1} due to loop detection",
                "recovery_attempt": self.auto_recovery_attempts,
                "timestamp": datetime.now().isoformat()
            })

    async def _emergency_step_skip(self, prep_res):
        """Emergency skip of problematic step"""
        if self.outline and self.current_outline_step < len(self.outline["steps"]) - 1:
            current_step = self.outline["steps"][self.current_outline_step]
            current_step["status"] = "emergency_skipped"
            current_step["skip_reason"] = "loop_recovery"

            self.current_outline_step += 1

            # Add to context
            self.reasoning_context.append({
                "type": "emergency_skip",
                "content": f"Emergency skipped step {self.current_outline_step} and advanced to step {self.current_outline_step + 1}",
                "recovery_attempt": self.auto_recovery_attempts,
                "timestamp": datetime.now().isoformat()
            })

    async def _emergency_completion(self, prep_res):
        """Emergency completion of reasoning"""
        # Mark all remaining steps as emergency completed
        if self.outline:
            for i in range(self.current_outline_step, len(self.outline["steps"])):
                self.outline["steps"][i]["status"] = "emergency_completed"

            self.current_outline_step = len(self.outline["steps"])

        # Add to context
        self.reasoning_context.append({
            "type": "emergency_completion",
            "content": "Emergency completion triggered due to excessive recovery attempts",
            "recovery_attempt": self.auto_recovery_attempts,
            "timestamp": datetime.now().isoformat()
        })

    def _update_performance_metrics(self, loop_start_time: float, progress_made: bool):
        """Update performance metrics with accurate action efficiency tracking"""
        loop_duration = time.time() - loop_start_time

        # Initialize metrics if needed
        if not hasattr(self, 'performance_metrics') or not self.performance_metrics:
            self.performance_metrics = {
                "loop_times": [],
                "progress_loops": 0,
                "total_loops": 0
            }

        # Update core metrics
        self.performance_metrics["loop_times"].append(loop_duration)
        self.performance_metrics["total_loops"] += 1

        if progress_made:
            self.performance_metrics["progress_loops"] += 1

        # Calculate derived metrics
        total = self.performance_metrics["total_loops"]
        progress = self.performance_metrics["progress_loops"]

        self.performance_metrics["avg_loop_time"] = sum(self.performance_metrics["loop_times"]) / len(
            self.performance_metrics["loop_times"])
        self.performance_metrics["action_efficiency"] = progress / total if total > 0 else 0.0
        self.performance_metrics["progress_rate"] = self.performance_metrics["action_efficiency"]  # Same metric

        # Keep only recent loop times for memory efficiency
        if len(self.performance_metrics["loop_times"]) > 10:
            self.performance_metrics["loop_times"] = self.performance_metrics["loop_times"][-10:]

    def _add_context_to_reasoning(self, context_addition: str, meta_tool_name: str,
                                  execution_details: dict = None) -> None:
        """Add context addition to reasoning context for immediate visibility in next LLM prompt"""
        if not context_addition:
            return

        # Create structured context entry
        context_entry = {
            "type": "meta_tool_result",
            "content": context_addition,
            "meta_tool": meta_tool_name,
            "loop": self.current_loop_count,
            "outline_step": getattr(self, 'current_outline_step', 0),
            "timestamp": datetime.now().isoformat()
        }

        # Add execution details if provided
        if execution_details:
            context_entry["execution_details"] = {
                "duration": execution_details.get("execution_duration", 0),
                "success": execution_details.get("execution_success", False),
                "tool_category": execution_details.get("tool_category", "unknown")
            }

        # Add to reasoning context for immediate visibility
        self.reasoning_context.append(context_entry)

        # Store in variables for persistent access
        if self.agent_instance:
            if not self.agent_instance.shared.get("system_context"):
                self.agent_instance.shared["system_context"] = {}
            if not self.agent_instance.shared["system_context"].get("reasoning_context"):
                self.agent_instance.shared["system_context"]["reasoning_context"] = {}

            result_key = f"reasoning.loop_{self.current_loop_count}_{meta_tool_name}"
            self.agent_instance.shared["system_context"]["reasoning_context"][result_key] = {
                "context_addition": context_addition,
                "meta_tool": meta_tool_name,
                "timestamp": datetime.now().isoformat(),
                "loop": self.current_loop_count
            }

    async def _parse_and_execute_meta_tools(self, llm_response: str, prep_res: dict) -> dict[str, Any]:
        """Enhanced meta-tool parsing with comprehensive progress tracking"""

        result = {
            "final_result": None,
            "action_taken": None,
            "progress_made": False,
            "context_addition": None
        }

        progress_tracker = prep_res.get("progress_tracker")
        session_id = prep_res.get("session_id")

        # Pattern to match META_TOOL_CALL: tool_name(args...)
        pattern = r'META_TOOL_CALL:'
        matches = _extract_meta_tool_calls(llm_response, pattern)

        if not matches and progress_tracker:
            # No meta-tools found in response
            await progress_tracker.emit_event(ProgressEvent(
                event_type="meta_tool_analysis",
                node_name="LLMReasonerNode",
                session_id=session_id,
                status=NodeStatus.COMPLETED,
                success=True,  # Die Analyse selbst war erfolgreich
                node_phase="analysis_complete",  # Verwendung des dedizierten Feldes
                llm_output=llm_response,  # Speichert die vollständige analysierte Antwort
                metadata={
                    "analysis_result": "no_meta_tools_detected",
                    "reasoning_loop": self.current_loop_count,
                    "outline_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0,
                    "context_size": len(self.reasoning_context),
                    "performance_warning": len(self.reasoning_context) > 10 and self.current_loop_count > 5
                }
            ))
            result["context_addition"] = "No action taken - this violates outline-driven execution requirements"
            self._add_context_to_reasoning(result["context_addition"], "invalid", {})

            return result

        for i, (tool_name, args_str) in enumerate(matches):
            meta_tool_start = time.perf_counter()

            # Track action signature for loop detection
            action_signature = f"{tool_name}:{hash(args_str) % 1000}"
            self.last_action_signatures.append(action_signature)
            if len(self.last_action_signatures) > 10:
                self.last_action_signatures = self.last_action_signatures[-10:]

            try:
                # Parse arguments with enhanced error handling
                args = _parse_tool_args(args_str)
                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="tool_call",  # Vereinheitlicht auf "tool_call"
                        node_name="LLMReasonerNode",
                        session_id=session_id,
                        status=NodeStatus.RUNNING,
                        tool_name=tool_name,
                        is_meta_tool=True,  # Klares Flag für Meta-Tools
                        tool_args=args,
                        task_id=f"meta_tool_{tool_name}_{i + 1}",
                        metadata={
                            "reasoning_loop": self.current_loop_count,
                            "outline_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0
                        }
                    ))
                rprint(f"Parsed args: {args}")

                # Execute meta-tool with detailed tracking
                meta_result = None
                execution_details = {
                    "meta_tool_name": tool_name,
                    "parsed_args": args,
                    "execution_success": False,
                    "execution_duration": 0.0,
                    "reasoning_loop": self.current_loop_count,
                    "outline_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0,
                    "context_before_size": len(self.reasoning_context),
                    "task_stack_before_size": len(self.internal_task_stack),
                    "tool_category": self._get_tool_category(tool_name),
                    "execution_phase": "executing"
                }

                if tool_name == "internal_reasoning":
                    meta_result = await self._execute_enhanced_internal_reasoning(args, prep_res)
                    execution_details.update({
                        "thought_number": args.get("thought_number", 1),
                        "total_thoughts": args.get("total_thoughts", 1),
                        "current_focus": args.get("current_focus", ""),
                        "confidence_level": args.get("confidence_level", 0.5),
                        "key_insights": args.get("key_insights", []),
                        "key_insights_count": len(args.get("key_insights", [])),
                        "potential_issues_count": len(args.get("potential_issues", [])),
                        "next_thought_needed": args.get("next_thought_needed", False),
                        "internal_reasoning_log_size": len(getattr(self, 'internal_reasoning_log', [])),
                        "reasoning_depth": self._calculate_reasoning_depth(),
                        "outline_step_progress": args.get("outline_step_progress", "")
                    })
                    result["action_taken"] = False

                elif tool_name == "manage_internal_task_stack":
                    meta_result = await self._execute_enhanced_task_stack(args, prep_res)
                    execution_details.update({
                        "stack_action": args.get("action", "unknown"),
                        "task_description": args.get("task_description", ""),
                        "outline_step_ref": args.get("outline_step_ref", ""),
                        "stack_size_before": len(self.internal_task_stack),
                        "stack_size_after": 0  # Will be updated below
                    })
                    execution_details["stack_size_after"] = len(self.internal_task_stack)
                    execution_details["stack_change"] = execution_details["stack_size_after"] - execution_details[
                        "stack_size_before"]
                    result["action_taken"] = True

                elif tool_name == "delegate_to_llm_tool_node":
                    meta_result = await self._execute_enhanced_delegate_llm_tool(args, prep_res)
                    execution_details.update({
                        "delegated_task_description": args.get("task_description", ""),
                        "tools_list": args.get("tools_list", []),
                        "tools_count": len(args.get("tools_list", [])),
                        "delegation_target": "LLMToolNode",
                        "sub_system_execution": True,
                        "delegation_complexity": self._assess_delegation_complexity(args),
                        "outline_step_completion": args.get("outline_step_completion", False)
                    })
                    result["action_taken"] = True
                    result["progress_made"] = True

                elif tool_name == "create_and_execute_plan":
                    meta_result = await self._execute_enhanced_create_plan(args, prep_res)
                    execution_details.update({
                        "goals_list": args.get("goals", []),
                        "goals_count": len(args.get("goals", [])),
                        "plan_execution_target": "TaskPlanner_TaskExecutor",
                        "sub_system_execution": True,
                        "complex_workflow": True,
                        "estimated_complexity": self._estimate_plan_complexity(args.get("goals", [])),
                        "outline_step_completion": args.get("outline_step_completion", False)
                    })
                    result["action_taken"] = True
                    result["progress_made"] = True

                elif tool_name == "advance_outline_step":
                    meta_result = await self._execute_advance_outline_step(args, prep_res)
                    execution_details.update({
                        "step_completed": args.get("step_completed", False),
                        "completion_evidence": args.get("completion_evidence", ""),
                        "next_step_focus": args.get("next_step_focus", ""),
                        "outline_advancement": True,
                        "step_progression": f"{self.current_outline_step}/{len(self.outline.get('steps', [])) if self.outline else 0}"
                    })
                    result["action_taken"] = True
                    result["progress_made"] = True

                elif tool_name == "write_to_variables":
                    meta_result = await self._execute_write_to_variables(args)
                    execution_details.update({
                        "variable_scope": args.get("scope", "reasoning"),
                        "variable_key": args.get("key", ""),
                        "variable_description": args.get("description", ""),
                        "data_persistence": True,
                        "variable_system_operation": "write"
                    })
                    result["action_taken"] = True

                elif tool_name == "read_from_variables":
                    meta_result = await self._execute_read_from_variables(args)
                    execution_details.update({
                        "variable_scope": args.get("scope", "reasoning"),
                        "variable_key": args.get("key", ""),
                        "read_purpose": args.get("purpose", ""),
                        "variable_system_operation": "read",
                        "data_retrieval": True
                    })
                    result["action_taken"] = True

                elif tool_name == "direct_response":

                    final_answer = args.get("final_answer", "Task completed.").replace('\\n', '\n').replace('\\t', '\t')
                    execution_details.update({
                        "final_answer": final_answer,
                        "final_answer_length": len(final_answer),
                        "reasoning_complete": True,
                        "flow_termination": True,
                        "reasoning_summary": self._create_reasoning_summary(),
                        "total_reasoning_steps": len(self.reasoning_context),
                        "outline_completion": True,
                        "steps_completed": args.get("steps_completed", []),
                        "session_completion": True
                    })

                    completion_context = f"✅ REASONING COMPLETE: {final_answer}"
                    self._add_context_to_reasoning(completion_context, tool_name, execution_details)

                    # Store successful completion
                    await self._store_successful_completion(prep_res, final_answer)

                    if progress_tracker:
                        meta_tool_duration = time.perf_counter() - meta_tool_start
                        execution_details["execution_duration"] = meta_tool_duration
                        execution_details["execution_success"] = True

                        await progress_tracker.emit_event(ProgressEvent(
                            event_type="meta_tool_call",
                            timestamp=time.time(),
                            node_name="LLMReasonerNode",
                            status=NodeStatus.COMPLETED,
                            session_id=session_id,
                            task_id=f"meta_tool_{tool_name}_{i + 1}",
                            node_duration=meta_tool_duration,
                            success=True,
                            metadata=execution_details
                        ))

                    result["final_result"] = final_answer
                    result["action_taken"] = True
                    result["progress_made"] = True
                    return result

                # test if tool name is in agent tools if so try to run it
                elif tool_name in self.agent_instance.tool_registry:
                    meta_result = await self.agent_instance.arun_function(tool_name, **args)
                    result["action_taken"] = True
                    result["progress_made"] = True
                    execution_details.update({
                        "tool_name": tool_name,
                        "tool_args": args,
                        "tool_result": meta_result
                    })

                else:
                    execution_details.update({
                        "error_type": "unknown_meta_tool",
                        "error_message": f"Unknown meta-tool: {tool_name}",
                        "execution_success": False,
                        "available_meta_tools": ["internal_reasoning", "manage_internal_task_stack",
                                            "delegate_to_llm_tool_node", "create_and_execute_plan",
                                            "advance_outline_step", "write_to_variables", "read_from_variables",
                                            "direct_response"]
                    })

                    if progress_tracker:
                        meta_tool_duration = time.perf_counter() - meta_tool_start
                        await progress_tracker.emit_event(ProgressEvent(
                            event_type="meta_tool_call",
                            timestamp=time.time(),
                            node_name="LLMReasonerNode",
                            status=NodeStatus.FAILED,
                            session_id=session_id,
                            task_id=f"meta_tool_{tool_name}_{i + 1}",
                            node_duration=meta_tool_duration,
                            success=False,
                            metadata=execution_details
                        ))

                    error_context = f"❌ Unknown meta-tool: {tool_name}"
                    self._add_context_to_reasoning(error_context, tool_name, execution_details)
                    wprint(f"Unknown meta-tool: {tool_name}")
                    continue

                # Update execution details with results
                meta_tool_duration = time.perf_counter() - meta_tool_start
                execution_details.update({
                    "execution_duration": meta_tool_duration,
                    "execution_success": True,
                    "context_after_size": len(self.reasoning_context),
                    "task_stack_after_size": len(self.internal_task_stack),
                    "performance_score": self._calculate_tool_performance_score(meta_tool_duration, tool_name),
                    "execution_phase": "completed"
                })
                self._track_action_type(tool_name, success=True)

                # Add result to context
                if meta_result and meta_result.get("context_addition"):
                    result["context_addition"] = meta_result["context_addition"]
                    execution_details["context_addition_length"] = len(meta_result["context_addition"])

                    self._add_context_to_reasoning(meta_result["context_addition"], tool_name, execution_details)

                # Emit success event
                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="meta_tool_call",
                        timestamp=time.time(),
                        node_name="LLMReasonerNode",
                        status=NodeStatus.COMPLETED,
                        session_id=session_id,
                        task_id=f"meta_tool_{tool_name}_{i + 1}",
                        node_duration=meta_tool_duration,
                        success=True,
                        metadata=execution_details
                    ))

            except Exception as e:
                meta_tool_duration = time.perf_counter() - meta_tool_start
                error_details = {
                    "meta_tool_name": tool_name,
                    "execution_success": False,
                    "execution_duration": meta_tool_duration,
                    "error_type": type(e).__name__,
                    "error_message": str(e),
                    "reasoning_loop": self.current_loop_count,
                    "outline_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0,
                    "parsed_args": args if 'args' in locals() else None,
                    "raw_args_string": args_str,
                    "execution_phase": "meta_tool_error",
                    "context_size_at_error": len(self.reasoning_context),
                    "task_stack_size_at_error": len(self.internal_task_stack),
                    "tool_category": self._get_tool_category(tool_name),
                    "error_context": self._get_error_context(e),
                    "recovery_recommended": self.auto_recovery_attempts < getattr(self, 'max_auto_recovery', 3)
                }

                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="meta_tool_call",
                        timestamp=time.time(),
                        node_name="LLMReasonerNode",
                        status=NodeStatus.FAILED,
                        session_id=session_id,
                        task_id=f"meta_tool_{tool_name}_{i + 1}",
                        node_duration=meta_tool_duration,
                        success=False,
                        metadata=error_details
                    ))

                eprint(f"Meta-tool execution failed for {tool_name}: {e}")
                result["context_addition"] = f"Error executing {tool_name}: {str(e)}"

                self._add_context_to_reasoning(result["context_addition"], tool_name, execution_details)

        # Final summary event if multiple meta-tools were processed
        if len(matches) > 1 and progress_tracker:
            batch_performance = self._calculate_batch_performance(matches)
            reasoning_progress = self._assess_reasoning_progress()

            await progress_tracker.emit_event(ProgressEvent(
                event_type="meta_tool_batch_complete",
                timestamp=time.time(),
                node_name="LLMReasonerNode",
                status=NodeStatus.COMPLETED,
                session_id=session_id,
                metadata={
                    "total_meta_tools_processed": len(matches),
                    "reasoning_loop": self.current_loop_count,
                    "outline_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0,
                    "batch_execution_complete": True,
                    "final_context_size": len(self.reasoning_context),
                    "final_task_stack_size": len(self.internal_task_stack),
                    "meta_tools_executed": [match[0] for match in matches],
                    "execution_phase": "meta_tool_batch_summary",
                    "batch_performance": batch_performance,
                    "reasoning_progress": reasoning_progress,
                    "progress_made": result["progress_made"],
                    "action_taken": result["action_taken"],
                    "outline_status": {
                        "current_step": self.current_outline_step if hasattr(self, 'current_outline_step') else 0,
                        "total_steps": len(self.outline.get('steps', [])) if self.outline else 0,
                        "completion_ratio": (
                                self.current_outline_step / len(self.outline.get('steps', [1]))) if self.outline else 0
                    },
                    "performance_summary": {
                        "loop_efficiency": self.performance_metrics.get("action_efficiency", 0) if hasattr(self,
                                                                                                           'performance_metrics') else 0,
                        "recovery_attempts": getattr(self, 'auto_recovery_attempts', 0),
                        "context_management_active": len(self.reasoning_context) >= getattr(self,
                                                                                            'context_summary_threshold',
                                                                                            15)
                    }
                }
            ))

        return result

    async def _execute_enhanced_internal_reasoning(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Enhanced internal reasoning with outline step tracking"""
        # Standard internal reasoning execution
        result = await self._execute_internal_reasoning(args, prep_res)

        # Enhanced with outline step progress
        outline_step_progress = args.get("outline_step_progress", "")
        if outline_step_progress and result:
            result["context_addition"] += f"\nOutline Step Progress: {outline_step_progress}"

        # Track reasoning depth for current step
        if not hasattr(self, '_step_reasoning_depth'):
            self._step_reasoning_depth = {}

        current_step = self.current_outline_step
        self._step_reasoning_depth[current_step] = self._step_reasoning_depth.get(current_step, 0) + 1

        # Warn if too much reasoning without action
        if self._step_reasoning_depth[current_step] > 3:
            result["context_addition"] += "\n⚠️ WARNING: Too much reasoning without concrete action for current step"

        return result

    async def _execute_enhanced_task_stack(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Enhanced task stack management with outline step tracking"""
        # Get outline step reference
        outline_step_ref = args.get("outline_step_ref", f"step_{self.current_outline_step}")

        # Execute standard task stack management
        result = await self._execute_manage_task_stack(args, prep_res)

        # Enhanced with outline step reference
        if result:
            result["context_addition"] += f"\n[Linked to: {outline_step_ref}]"

        return result

    async def _execute_enhanced_delegate_llm_tool(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Enhanced delegation with immediate result visibility and guaranteed storage"""
        task_description = args.get("task_description", "")
        tools_list = args.get("tools_list", [])
        outline_step_completion = args.get("outline_step_completion", False)

        # Generate unique delegation ID for this execution
        delegation_id = f"delegation_loop_{self.current_loop_count}"

        # Prepare shared state for LLMToolNode with enhanced result capture
        llm_tool_shared = {
            "current_task_description": task_description,
            "current_query": task_description,
            "formatted_context": {
                "recent_interaction": f"Reasoner delegating task: {task_description}",
                "session_summary": self._get_reasoning_summary(),
                "task_context": f"Loop {self.current_loop_count} delegation - CAPTURE ALL RESULTS"
            },
            "variable_manager": prep_res.get("variable_manager"),
            "agent_instance": prep_res.get("agent_instance"),
            "available_tools": tools_list,
            "tool_capabilities": prep_res.get("tool_capabilities", {}),
            "fast_llm_model": prep_res.get("fast_llm_model"),
            "complex_llm_model": prep_res.get("complex_llm_model"),
            "progress_tracker": prep_res.get("progress_tracker"),
            "session_id": prep_res.get("session_id"),
            "use_fast_response": True
        }

        try:
            # Execute LLMToolNode
            llm_tool_node = LLMToolNode()
            await llm_tool_node.run_async(llm_tool_shared)

            # IMMEDIATE RESULT EXTRACTION - Critical for visibility
            final_response = llm_tool_shared.get("current_response", "No response captured")
            tool_calls_made = llm_tool_shared.get("tool_calls_made", 0)
            tool_results = llm_tool_shared.get("results", {})

            # GUARANTEED STORAGE - Multiple storage patterns for reliability
            delegation_result = {
                "task_description": task_description,
                "tools_used": tools_list,
                "tool_calls_made": tool_calls_made,
                "final_response": final_response,
                "results": tool_results,
                "timestamp": datetime.now().isoformat(),
                "delegation_id": delegation_id,
                "outline_step": self.current_outline_step,
                "reasoning_loop": self.current_loop_count,
                "success": True
            }

            # CRITICAL: Store immediately with multiple access patterns
            if self.variable_manager:
                # 1. Primary delegation storage
                self.variable_manager.set(f"delegation.loop_{self.current_loop_count}", delegation_result)

                # 2. Latest results quick access
                self.variable_manager.set("delegation.latest", delegation_result)

                # 3. Store individual tool results with direct access
                for result_id, result_data in tool_results.items():
                    self.variable_manager.set(f"results.{result_id}.data", result_data.get("data"))

                # 4. Create smart access keys for common patterns
                if "read_file" in tools_list and tool_results:
                    file_content = next((res.get("data") for res in tool_results.values()
                                         if res.get("data") and isinstance(res.get("data"), str)), None)
                    if file_content:
                        self.variable_manager.set("var.file_content", file_content)
                        self.variable_manager.set("latest_file_content", file_content)

                # 5. Update delegation index for discovery
                index = self.variable_manager.get("delegation.index", [])
                index.append({
                    "loop": self.current_loop_count,
                    "task": task_description[:100],
                    "tools": tools_list,
                    "timestamp": datetime.now().isoformat(),
                    "results_available": len(tool_results) > 0
                })
                self.variable_manager.set("delegation.index", index[-20:])

            # Create comprehensive context addition with IMMEDIATE VISIBILITY
            context_addition = f"""DELEGATION COMPLETED (Loop {self.current_loop_count}):
Task: {task_description}
Tools: {', '.join(tools_list)}
Calls Made: {tool_calls_made}
Results Captured: {len(tool_results)} items

FINAL RESULT: {final_response}

- reference variable: delegation.loop_{self.current_loop_count}
DELEGATION END
"""

            # Mark outline step completion if specified
            if outline_step_completion:
                await self._mark_step_completion(prep_res, "delegation_complete", context_addition)

            return {"context_addition": context_addition}

        except Exception as e:
            error_msg = f"❌ DELEGATION FAILED: {str(e)}"
            # Store error for debugging
            if self.variable_manager:
                error_data = {
                    "task": task_description,
                    "error": str(e),
                    "timestamp": datetime.now().isoformat(),
                    "loop": self.current_loop_count
                }
                self.variable_manager.set(f"delegation.error.loop_{self.current_loop_count}", error_data)

            return {"context_addition": error_msg}

    async def _execute_enhanced_create_plan(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Enhanced plan creation with outline step completion tracking"""
        # Check if this completes the outline step
        outline_step_completion = args.get("outline_step_completion", False)

        # Execute standard plan creation
        result = await self._execute_create_plan(args, prep_res)

        # Enhanced with step completion tracking
        if outline_step_completion and result:
            await self._mark_step_completion(prep_res, "create_and_execute_plan", result["context_addition"])
            result["context_addition"] += f"\n✓ OUTLINE STEP {self.current_outline_step + 1} COMPLETED"

        return result

    async def _execute_advance_outline_step(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Execute outline step advancement"""
        step_completed = args.get("step_completed", False)
        completion_evidence = args.get("completion_evidence", "")
        next_step_focus = args.get("next_step_focus", "")

        if not self.outline or not self.outline.get("steps"):
            return {"context_addition": "Cannot advance: No outline available"}

        steps = self.outline["steps"]

        if self.current_outline_step >= len(steps):
            return {"context_addition": "Cannot advance: Already at final step"}

        if step_completed:
            # Mark current step as completed
            if self.current_outline_step < len(steps):
                current_step = steps[self.current_outline_step]
                current_step["status"] = "completed"
                current_step["completion_evidence"] = completion_evidence
                current_step["completed_at"] = datetime.now().isoformat()

            # Advance to next step
            self.current_outline_step += 1

            # Store advancement in variables
            if self.variable_manager:
                advancement_data = {
                    "step_completed": self.current_outline_step,
                    "completion_evidence": completion_evidence,
                    "next_step_focus": next_step_focus,
                    "timestamp": datetime.now().isoformat()
                }
                self.variable_manager.set(f"reasoning.step_completions.{self.current_outline_step - 1}",
                                          advancement_data)

            context_addition = f"""✓ STEP {self.current_outline_step} COMPLETED
Evidence: {completion_evidence}
Advanced to Step {self.current_outline_step + 1}/{len(steps)}"""

            if next_step_focus:
                context_addition += f"\nNext Step Focus: {next_step_focus}"

            if self.current_outline_step >= len(steps):
                context_addition += "\n🎯 ALL OUTLINE STEPS COMPLETED - Ready for direct_response"

        else:
            context_addition = f"Step {self.current_outline_step + 1} not yet completed - continue working on current step"

        return {"context_addition": context_addition}

    async def _execute_read_from_variables(self, args: dict) -> dict[str, Any]:
        """Enhanced variable reading with intelligent discovery and loop prevention"""
        if not self.variable_manager:
            return {"context_addition": "❌ Variable system not available"}

        scope = args.get("scope", args.get("query", "reasoning"))
        key = args.get("key", "")
        purpose = args.get("purpose", "")

        # CRITICAL: Check for repeated reads - prevent infinite loops
        read_signature = f"{scope}.{key}"
        if not hasattr(self, '_variable_read_history'):
            self._variable_read_history = []

        # Prevent reading same variable multiple times in short succession
        recent_reads = [r for r in self._variable_read_history if r['signature'] == read_signature]
        if len(recent_reads) >= 2:
            self._variable_read_history.append({
                'signature': read_signature,
                'timestamp': time.time(),
                'loop': self.current_loop_count
            })
            return {
                "context_addition": f"⚠️ LOOP PREVENTION: Already read {read_signature} {len(recent_reads)} times. Try different approach or advance to next task."
            }

        # Record this read attempt
        self._variable_read_history.append({
            'signature': read_signature,
            'timestamp': time.time(),
            'loop': self.current_loop_count
        })

        # Clean old read history (keep last 10)
        if len(self._variable_read_history) > 10:
            self._variable_read_history = self._variable_read_history[-10:]

        if not key:
            return {"context_addition": "❌ Cannot read: No key provided"}

        try:
            # Smart key resolution for common patterns
            resolved_key = self._resolve_smart_key(scope, key)

            # Try direct access first
            value = self.variable_manager.get(resolved_key)

            if value is not None:
                # Format value for display
                value_display = self._format_variable_value(value)

                context_addition = f"""{resolved_key}={value_display}
Access: Successfully retrieved from variable system"""

                return {"context_addition": context_addition}

            else:
                # Enhanced discovery when not found
                discovery_result = self._perform_smart_variable_discovery(scope, key, purpose)
                return {"context_addition": discovery_result}

        except Exception as e:
            return {"context_addition": f"❌ Variable read error: {str(e)}"}

    def _resolve_smart_key(self, scope: str, key: str) -> str:
        """Resolve smart key patterns for common access cases"""
        # Handle delegation results specially
        if scope == "delegation" and "loop_" in key:
            return f"delegation.{key}"
        elif scope == "results" and key.endswith(".data"):
            return f"results.{key}"
        elif scope == "var" or key.startswith("var."):
            return key if key.startswith("var.") else f"var.{key}"
        else:
            return f"{scope}.{key}" if scope != "reasoning" else f"reasoning.{key}"

    def _format_variable_value(self, value: any) -> str:
        """Format variable value for display with intelligent truncation"""
        if isinstance(value, dict | list):
            value_str = json.dumps(value, default=str, indent=2)
        else:
            value_str = str(value)

        # Smart truncation based on content type
        if len(value_str) > 200000:
            if isinstance(value, dict) and "results" in str(value):
                # For result dicts, show structure
                return f"RESULTS DICT ({len(value)} keys):\n" + value_str[:150000] + "\n... [TRUNCATED]"
            elif isinstance(value, str) and (value.startswith("# ") or "markdown" in value.lower()):
                # For file content, show beginning
                return f"FILE CONTENT ({len(value_str)} chars):\n" + value_str[:100000] + "\n... [FULL CONTENT AVAILABLE]"
            else:
                return value_str[:100000] + f"\n... [TRUNCATED - {len(value_str)} total chars]"

        return value_str

    def _perform_smart_variable_discovery(self, scope: str, key: str, purpose: str) -> str:
        """Perform intelligent variable discovery when key not found"""
        # Check latest delegation results first
        latest = self.variable_manager.get("delegation.latest")
        if latest:
            discovery_msg = f"❌ Variable not found: {scope}.{key}\n\n✨ LATEST DELEGATION RESULTS AVAILABLE:"
            discovery_msg += f"\nTask: {latest.get('task_description', 'Unknown')[:100]}"
            discovery_msg += f"\nResults: {len(latest.get('results', {}))} items available"
            discovery_msg += "\nAccess with: delegation.latest"

            # Show actual keys available
            if latest.get('results'):
                discovery_msg += "\n\n🔍 Available result keys:"
                for result_id in latest['results']:
                    discovery_msg += f"\n• results.{result_id}.data"

            return discovery_msg

        # Check delegation index for recent activity
        index = self.variable_manager.get("delegation.index", [])
        if index:
            recent = index[-3:]  # Last 3 delegations
            discovery_msg = f"❌ Variable not found: {scope}.{key}\n\n📚 RECENT DELEGATIONS:"
            for entry in recent:
                discovery_msg += f"\n• Loop {entry['loop']}: {entry['task'][:50]}..."
                discovery_msg += f"  Access: delegation.loop_{entry['loop']}"
            return discovery_msg

        # Fallback: show available scopes
        available_vars = self.variable_manager.get_available_variables()
        return f"❌ Variable not found: {scope}.{key}\n\n📋 Available scopes: {', '.join(available_vars.keys())}"


    async def _execute_write_to_variables(self, args: dict) -> dict[str, Any]:
        """Enhanced variable writing with automatic result storage"""
        if not self.variable_manager:
            return {"context_addition": "❌ Variable system not available"}

        scope = args.get("scope", "reasoning")
        key = args.get("key", "")
        value = args.get("value", "")
        description = args.get("description", "")

        if not key:
            return {"context_addition": "❌ Cannot write to variables: No key provided"}

        try:
            # Create scoped key
            full_key = f"{scope}.{key}" if scope != "reasoning" else f"reasoning.{key}"

            # Write to variables
            self.variable_manager.set(full_key, value)

            # Store enhanced metadata
            metadata = {
                "description": description,
                "written_at": datetime.now().isoformat(),
                "outline_step": getattr(self, 'current_outline_step', 0),
                "reasoning_loop": self.current_loop_count,
                "value_type": type(value).__name__,
                "value_size": len(str(value)) if value else 0,
                "auto_stored": False  # Manual storage
            }
            self.variable_manager.set(f"{full_key}_metadata", metadata)

            # Update storage index for easy discovery
            storage_index = self.variable_manager.get("reasoning.storage_index", [])
            storage_entry = {
                "key": full_key,
                "description": description,
                "timestamp": datetime.now().isoformat(),
                "loop": self.current_loop_count
            }
            storage_index.append(storage_entry)
            self.variable_manager.set("reasoning.storage_index", storage_index[-20:])  # Keep last 20

            context_addition = f"✅ Stored in variables: {full_key}"
            if description:
                context_addition += f"\n📄 Description: {description}"

            # Show how to access it
            context_addition += f"\n🔍 Access with: read_from_variables(scope=\"{scope}\", key=\"{key}\", purpose=\"...\")"

            return {"context_addition": context_addition}

        except Exception as e:
            return {"context_addition": f"❌ Failed to write to variables: {str(e)}"}

    def _auto_store_delegation_results(self, delegation_result: dict, task_description: str) -> str:
        """Automatically store delegation results with smart naming and comprehensive indexing"""
        if not self.variable_manager:
            return "\n❌ Variable system not available for auto-storage"

        storage_summary = []

        try:
            # Store main delegation result with loop reference
            main_key = f"delegation.loop_{self.current_loop_count}"
            self.variable_manager.set(main_key, delegation_result)
            storage_summary.append(f"• {main_key}")

            # Store individual tool results with smart naming
            results = delegation_result.get("results", {})
            smart_keys_created = []

            for result_id, result_data in results.items():
                # Smart naming based on task content and result type
                smart_key = self._generate_smart_key(task_description, result_id, result_data)

                # Store full result
                self.variable_manager.set(smart_key, result_data)
                storage_summary.append(f"• {smart_key}")
                smart_keys_created.append(smart_key)

                # Store data separately for direct access
                if result_data.get("data"):
                    data_key = f"{smart_key}.data"
                    self.variable_manager.set(data_key, result_data["data"])
                    storage_summary.append(f"• {data_key} (direct access)")

                    # Store with generic access pattern
                    generic_data_key = f"results.{result_id}.data"
                    self.variable_manager.set(generic_data_key, result_data["data"])
                    storage_summary.append(f"• {generic_data_key} (standard access)")

            # Update comprehensive quick access index
            quick_access = {
                "latest_delegation": main_key,
                "latest_task": task_description,
                "timestamp": datetime.now().isoformat(),
                "loop": self.current_loop_count,
                "outline_step": getattr(self, 'current_outline_step', 0),
                "stored_keys": [item.replace("• ", "") for item in storage_summary],
                "smart_keys": smart_keys_created,
                "access_patterns": {
                    "main_result": main_key,
                    "by_loop": f"delegation.loop_{self.current_loop_count}",
                    "latest": "reasoning.latest_results",
                    "data_direct": [key for key in storage_summary if ".data" in key]
                }
            }
            self.variable_manager.set("reasoning.latest_results", quick_access)

            # Update global delegation index for easy discovery
            delegation_index = self.variable_manager.get("delegation.index", [])
            index_entry = {
                "loop": self.current_loop_count,
                "task": task_description[:100] + ("..." if len(task_description) > 100 else ""),
                "keys_created": len(storage_summary),
                "timestamp": datetime.now().isoformat(),
                "main_key": main_key,
                "smart_keys": smart_keys_created
            }
            delegation_index.append(index_entry)
            self.variable_manager.set("delegation.index", delegation_index[-50:])  # Keep last 50

            # Store task-specific quick access
            task_hash = hash(task_description) % 10000
            self.variable_manager.set(f"delegation.by_task.{task_hash}", {
                "task_description": task_description,
                "results": quick_access,
                "created_at": datetime.now().isoformat()
            })

            return f"\n📊 Auto-stored results ({len(storage_summary)} entries):\n" + "\n".join(storage_summary[:8]) + (
                f"\n... +{len(storage_summary) - 8} more" if len(storage_summary) > 8 else "")

        except Exception as e:
            return f"\n❌ Auto-storage failed: {str(e)}"

    def _generate_smart_key(self, task_description: str, result_id: str, result_data: dict) -> str:
        """Generate intelligent storage keys based on task content and result type"""
        task_lower = task_description.lower()

        # Analyze task type
        if "read" in task_lower and "file" in task_lower:
            prefix = "file_content"
        elif "write" in task_lower and "file" in task_lower:
            prefix = "file_written"
        elif "create" in task_lower and "file" in task_lower:
            prefix = "file_created"
        elif "search" in task_lower or "find" in task_lower:
            prefix = "search_results"
        elif "analyze" in task_lower or "analysis" in task_lower:
            prefix = "analysis_results"
        elif "list" in task_lower or "directory" in task_lower:
            prefix = "directory_listing"
        elif "download" in task_lower or "fetch" in task_lower:
            prefix = "downloaded_content"
        else:
            # Analyze result data for hints
            result_str = str(result_data).lower()
            if "file" in result_str and "content" in result_str:
                prefix = "file_content"
            elif "search" in result_str or "results" in result_str:
                prefix = "search_results"
            elif "data" in result_str:
                prefix = "task_data"
            else:
                prefix = "task_result"

        # Create unique key with loop and result ID
        return f"{prefix}.loop_{self.current_loop_count}_{result_id}"

    async def _mark_step_completion(self, prep_res: dict, method: str, evidence: str):
        """Mark current outline step as completed"""
        if not self.outline or not self.outline.get("steps"):
            return

        steps = self.outline["steps"]
        if self.current_outline_step < len(steps):
            current_step = steps[self.current_outline_step]
            current_step["status"] = "completed"
            current_step["completion_method"] = method
            current_step["completion_evidence"] = evidence
            current_step["completed_at"] = datetime.now().isoformat()

            # Store in variables
            if self.variable_manager:
                completion_data = {
                    "step_number": self.current_outline_step,
                    "description": current_step.get("description", ""),
                    "method": method,
                    "evidence": evidence,
                    "timestamp": datetime.now().isoformat()
                }
                self.variable_manager.set(f"reasoning.step_completions.{self.current_outline_step}", completion_data)

    async def _store_successful_completion(self, prep_res: dict, final_answer: str):
        """Store successful completion data for future learning"""
        if not self.variable_manager:
            return

        success_data = {
            "query": prep_res["original_query"],
            "final_answer": final_answer,
            "reasoning_loops": self.current_loop_count,
            "outline": self.outline,
            "performance_metrics": self.performance_metrics,
            "auto_recovery_attempts": self.auto_recovery_attempts,
            "completion_timestamp": datetime.now().isoformat(),
            "session_id": prep_res.get("session_id", "default")
        }

        # Store in successful patterns
        successes = self.variable_manager.get("reasoning.successful_patterns", [])
        successes.append(success_data)
        self.variable_manager.set("reasoning.successful_patterns", successes[-20:])  # Keep last 20

        # Update performance statistics
        self._update_success_statistics()

    def _update_success_statistics(self):
        """Update success statistics in variables"""
        if not self.variable_manager:
            return

        # Get current stats
        current_stats = self.variable_manager.get("reasoning.performance.statistics", {})

        # Update stats
        current_stats["total_successful_sessions"] = current_stats.get("total_successful_sessions", 0) + 1
        current_stats["avg_loops_per_success"] = current_stats.get("avg_loops_per_success", 0)

        # Calculate new average
        total_sessions = current_stats["total_successful_sessions"]
        old_avg = current_stats["avg_loops_per_success"] * (total_sessions - 1)
        current_stats["avg_loops_per_success"] = (old_avg + self.current_loop_count) / total_sessions

        # Store updated stats
        self.variable_manager.set("reasoning.performance.statistics", current_stats)

    async def _create_outline_completion_response(self, prep_res: dict) -> str:
        """Create response when outline is completed"""
        if not self.outline:
            return "Outline completion response requested but no outline available"

        steps = self.outline.get("steps", [])
        completed_steps = [s for s in steps if
                           s.get("status") in ["completed", "force_completed", "emergency_completed"]]

        response_parts = []
        response_parts.append("I have completed the structured approach outlined for your request:")

        # Summarize completed steps
        for i, step in enumerate(completed_steps):
            status_indicator = "✓" if step.get("status") == "completed" else "⚠️"
            response_parts.append(f"{status_indicator} Step {i + 1}: {step.get('description', 'Unknown step')}")

            # Add evidence if available
            evidence = step.get("completion_evidence", "")
            if evidence and len(evidence) < 200:
                response_parts.append(f"   Result: {evidence}")

        # Get final results from variables if available
        if self.variable_manager:
            final_results = self.variable_manager.get("reasoning.final_results", {})
            if final_results:
                response_parts.append("\nKey findings:")
                for key, value in final_results.items():
                    if isinstance(value, str) and len(value) < 300:
                        response_parts.append(f"- {key}: {value}")

        response_parts.append(
            f"\nCompleted in {self.current_loop_count} reasoning cycles using outline-driven execution.")

        return "\n".join(response_parts)

    async def _create_enhanced_timeout_response(self, query: str, prep_res: dict) -> str:
        """Create enhanced timeout response with comprehensive progress summary"""
        response_parts = []
        response_parts.append(
            f"I reached my reasoning limit of {self.max_reasoning_loops} steps while working on: {query}")

        # Outline progress
        if self.outline:
            steps = self.outline.get("steps", [])
            completed_steps = [s for s in steps if
                               s.get("status") in ["completed", "force_completed", "emergency_completed"]]
            unfinished_steps = [s for s in steps if s not in completed_steps]

            response_parts.append(f"\nOutline Progress: {len(completed_steps)}/{len(steps)} steps completed")

            if completed_steps:
                response_parts.append("Completed steps:")
                for i, step in enumerate(completed_steps):
                    response_parts.append(f"✓ {step.get('description', f'Step {i + 1}')}")

            if unfinished_steps:
                response_parts.append("Unfinished steps:")
                for i, step in enumerate(unfinished_steps):
                    response_parts.append(f"✗ {step.get('description', f'Step {i + 1}')}")

        # Task stack progress
        if self.internal_task_stack:
            completed_tasks = [t for t in self.internal_task_stack if t.get("status") == "completed"]
            pending_tasks = [t for t in self.internal_task_stack if t.get("status") == "pending"]

            response_parts.append(f"\nTask Progress: {len(completed_tasks)} completed, {len(pending_tasks)} pending")

        # Performance metrics
        if self.performance_metrics:
            response_parts.append(
                f"\nPerformance: {self.performance_metrics.get('action_efficiency', 0):.1%} efficiency, {self.auto_recovery_attempts} recovery attempts")

        # Available results from variables
        if self.variable_manager:
            reasoning_results = self.variable_manager.get("reasoning", {})
            if reasoning_results:
                response_parts.append(f"\nStored findings: {len(reasoning_results)} entries in reasoning variables")

        return "\n".join(response_parts)

    async def _finalize_reasoning_session(self, prep_res: dict, final_result: str):
        """Finalize reasoning session with comprehensive data storage"""
        if not self.variable_manager:
            return

        # Store session completion data
        session_data = {
            "query": prep_res["original_query"],
            "final_result": final_result,
            "reasoning_loops": self.current_loop_count,
            "outline_completion": self.current_outline_step,
            "performance_metrics": self.performance_metrics,
            "auto_recovery_attempts": self.auto_recovery_attempts,
            "context_summaries": len([c for c in self.reasoning_context if c.get("type") == "context_summary"]),
            "completion_timestamp": datetime.now().isoformat(),
            "session_duration": time.time() - time.mktime(datetime.now().timetuple()),
            "success": True
        }

        # Store in session history
        session_history = self.variable_manager.get("reasoning.session_history", [])
        session_history.append(session_data)
        self.variable_manager.set("reasoning.session_history", session_history[-50:])  # Keep last 50 sessions

        # Store outline pattern for reuse
        if self.outline:
            outline_pattern = {
                "query_type": self._classify_query_type(prep_res["original_query"]),
                "outline": self.outline,
                "success": True,
                "loops_used": self.current_loop_count,
                "timestamp": datetime.now().isoformat()
            }
            patterns = self.variable_manager.get("reasoning.successful_patterns.outlines", [])
            patterns.append(outline_pattern)
            self.variable_manager.set("reasoning.successful_patterns.outlines", patterns[-10:])

    def _classify_query_type(self, query: str) -> str:
        """Classify query type for pattern matching"""
        query_lower = query.lower()

        if any(word in query_lower for word in ["search", "find", "look up", "research"]):
            return "research"
        elif any(word in query_lower for word in ["analyze", "compare", "evaluate"]):
            return "analysis"
        elif any(word in query_lower for word in ["create", "generate", "write", "build"]):
            return "creation"
        elif any(word in query_lower for word in ["plan", "strategy", "approach"]):
            return "planning"
        else:
            return "general"

    async def _handle_reasoning_error(self, error: Exception, prep_res: dict, progress_tracker):
        """Enhanced error handling with auto-recovery"""
        eprint(f"Reasoning loop {self.current_loop_count} failed: {error}")

        # Store error in context
        self.reasoning_context.append({
            "type": "error",
            "content": f"Error in loop {self.current_loop_count}: {str(error)}",
            "error_type": type(error).__name__,
            "outline_step": self.current_outline_step,
            "timestamp": datetime.now().isoformat()
        })

        # Store in variables for learning
        if self.variable_manager:
            error_data = {
                "error": str(error),
                "error_type": type(error).__name__,
                "loop": self.current_loop_count,
                "outline_step": self.current_outline_step,
                "timestamp": datetime.now().isoformat(),
                "query": prep_res["original_query"]
            }
            errors = self.variable_manager.get("reasoning.error_log", [])
            errors.append(error_data)
            self.variable_manager.set("reasoning.error_log", errors[-100:])  # Keep last 100 errors

        # Trigger auto-recovery if not already in recovery
        if self.auto_recovery_attempts < self.max_auto_recovery:
            await self._trigger_auto_recovery(prep_res)

    # Keep all existing helper methods like _execute_internal_reasoning, etc.
    # but update them to use the enhanced variable system...

    async def post_async(self, shared, prep_res, exec_res):
        """Enhanced post-processing with comprehensive data storage"""
        final_result = exec_res.get("final_result", "Task processing incomplete")

        # Store comprehensive reasoning artifacts
        shared["reasoning_artifacts"] = {
            "reasoning_loops": exec_res.get("reasoning_loops", 0),
            "reasoning_context": exec_res.get("reasoning_context", []),
            "internal_task_stack": exec_res.get("internal_task_stack", []),
            "outline": exec_res.get("outline"),
            "outline_completion": exec_res.get("outline_completion", 0),
            "performance_metrics": exec_res.get("performance_metrics", {}),
            "auto_recovery_attempts": exec_res.get("auto_recovery_attempts", 0)
        }

        # Enhanced variable system updates
        if self.variable_manager:
            # Store final session results
            final_session_data = {
                "final_result": final_result,
                "completion_timestamp": datetime.now().isoformat(),
                "total_loops": exec_res.get("reasoning_loops", 0),
                "session_success": final_result != "Task processing incomplete",
                "outline_driven_execution": True
            }
            self.variable_manager.set("reasoning.current_session.final_data", final_session_data)

            # Update global performance statistics
            self._update_global_performance_stats(exec_res)

        # Set enhanced response data
        shared["llm_reasoner_result"] = final_result
        shared["current_response"] = final_result

        # Provide enhanced synthesis metadata
        shared["synthesized_response"] = {
            "synthesized_response": final_result,
            "confidence": self._calculate_confidence(exec_res),
            "metadata": {
                "synthesis_method": "outline_driven_reasoner",
                "reasoning_loops": exec_res.get("reasoning_loops", 0),
                "outline_completion": exec_res.get("outline_completion", 0),
                "performance_score": self._calculate_performance_score(exec_res),
                "auto_recovery_used": exec_res.get("auto_recovery_attempts", 0) > 0
            }
        }

        return "reasoner_complete"

    def _update_global_performance_stats(self, exec_res: dict):
        """Update global performance statistics in variables"""
        if not self.variable_manager:
            return

        stats = self.variable_manager.get("reasoning.global_performance", {})

        # Update counters
        stats["total_sessions"] = stats.get("total_sessions", 0) + 1
        stats["total_loops"] = stats.get("total_loops", 0) + exec_res.get("reasoning_loops", 0)
        stats["total_recoveries"] = stats.get("total_recoveries", 0) + exec_res.get("auto_recovery_attempts", 0)

        # Calculate averages
        stats["avg_loops_per_session"] = stats["total_loops"] / stats["total_sessions"]
        stats["recovery_rate"] = stats["total_recoveries"] / stats["total_sessions"]

        # Success tracking
        if exec_res.get("final_result") != "Task processing incomplete":
            stats["successful_sessions"] = stats.get("successful_sessions", 0) + 1
            stats["success_rate"] = stats["successful_sessions"] / stats["total_sessions"]

        self.variable_manager.set("reasoning.global_performance", stats)

    def _calculate_confidence(self, exec_res: dict) -> float:
        """Calculate confidence score based on execution results"""
        base_confidence = 0.5

        # Outline completion boosts confidence
        outline = exec_res.get("outline")
        if outline:
            completion_ratio = exec_res.get("outline_completion", 0) / len(outline.get("steps", [1]))
            base_confidence += 0.3 * completion_ratio

        # Low recovery attempts boost confidence
        recovery_attempts = exec_res.get("auto_recovery_attempts", 0)
        if recovery_attempts == 0:
            base_confidence += 0.15
        elif recovery_attempts == 1:
            base_confidence += 0.05

        # Reasonable loop count boosts confidence
        loops = exec_res.get("reasoning_loops", 0)
        if 3 <= loops <= 15:
            base_confidence += 0.1

        # Performance metrics
        performance = exec_res.get("performance_metrics", {})
        if performance.get("action_efficiency", 0) > 0.7:
            base_confidence += 0.1

        return min(1.0, max(0.0, base_confidence))

    def _calculate_performance_score(self, exec_res: dict) -> float:
        """Calculate overall performance score"""
        score = 0.5

        # Efficiency score
        performance = exec_res.get("performance_metrics", {})
        action_efficiency = performance.get("action_efficiency", 0)
        score += 0.3 * action_efficiency

        # Completion score
        outline = exec_res.get("outline")
        if outline:
            completion_ratio = exec_res.get("outline_completion", 0) / len(outline.get("steps", [1]))
            score += 0.4 * completion_ratio

        # Recovery penalty
        recovery_attempts = exec_res.get("auto_recovery_attempts", 0)
        score -= 0.1 * recovery_attempts

        return min(1.0, max(0.0, score))


    def _summarize_reasoning_context(self) -> str:
        """Summarize the current reasoning context"""
        if not self.reasoning_context:
            return "No previous reasoning steps"

        summary_parts = []
        for entry in self.reasoning_context[-5:]:  # Last 5 entries
            entry_type = entry.get("type", "unknown")
            content = entry.get("content", "")

            if entry_type == "reasoning":
                # Truncate long reasoning content
                content_preview = content[:20000] + "..." if len(content) > 20000 else content
                summary_parts.append(f"Loop {entry.get('loop', '?')}: {content_preview}")
            elif entry_type == "meta_tool_result":
                summary_parts.append(f"Result: {content[:150]}...")
            elif entry_type == "error":
                summary_parts.append(f"Error: {content}")

        return "\n".join(summary_parts)

    def _summarize_task_stack(self) -> str:
        """Summarize the internal task stack"""
        if not self.internal_task_stack:
            return "No tasks in stack"

        summary_parts = []
        for i, task in enumerate(self.internal_task_stack):
            status = task.get("status", "pending")
            description = task.get("description", "No description")
            summary_parts.append(f"{i + 1}. [{status.upper()}] {description}")

        return "\n".join(summary_parts)

    def _get_tool_category(self, tool_name: str) -> str:
        """Get category for meta-tool"""
        categories = {
            "internal_reasoning": "thinking",
            "manage_internal_task_stack": "planning",
            "delegate_to_llm_tool_node": "delegation",
            "create_and_execute_plan": "orchestration",
            "direct_response": "completion"
        }
        return categories.get(tool_name, "unknown")

    def _calculate_reasoning_depth(self) -> int:
        """Calculate current reasoning depth"""
        reasoning_entries = [entry for entry in self.reasoning_context if entry.get("type") == "reasoning"]
        return len(reasoning_entries)

    def _assess_delegation_complexity(self, args: dict) -> str:
        """Assess complexity of delegation task"""
        task_desc = args.get("task_description", "")
        tools_count = len(args.get("tools_list", []))

        if tools_count > 3 or len(task_desc) > 100:
            return "high"
        elif tools_count > 1 or len(task_desc) > 50:
            return "medium"
        else:
            return "low"

    def _estimate_plan_complexity(self, goals: list) -> str:
        """Estimate complexity of plan"""
        goal_count = len(goals)
        total_text = sum(len(str(goal)) for goal in goals)

        if goal_count > 5 or total_text > 500:
            return "high"
        elif goal_count > 2 or total_text > 200:
            return "medium"
        else:
            return "low"

    def _calculate_tool_performance_score(self, duration: float, tool_name: str) -> float:
        """Calculate performance score for tool execution"""
        # Expected durations by tool type
        expected_durations = {
            "internal_reasoning": 0.1,
            "manage_internal_task_stack": 0.05,
            "delegate_to_llm_tool_node": 3.0,
            "create_and_execute_plan": 10.0,
            "direct_response": 0.1
        }

        expected = expected_durations.get(tool_name, 1.0)
        if duration <= expected:
            return 1.0
        else:
            return max(0.0, expected / duration)

    def _create_reasoning_summary(self) -> str:
        """Create summary of reasoning process"""
        reasoning_entries = [entry for entry in self.reasoning_context if entry.get("type") == "reasoning"]
        task_entries = len(self.internal_task_stack)

        return f"Completed {len(reasoning_entries)} reasoning steps with {task_entries} tasks tracked"

    def _calculate_batch_performance(self, matches: list) -> dict[str, Any]:
        """Calculate performance metrics for batch execution"""
        tool_types = [match[0] for match in matches]
        return {
            "total_tools": len(matches),
            "tool_diversity": len(set(tool_types)),
            "most_used_tool": max(set(tool_types), key=tool_types.count) if tool_types else "none"
        }

    def _assess_reasoning_progress(self) -> str:
        """Assess overall reasoning progress"""
        if len(self.reasoning_context) < 3:
            return "early_stage"
        elif len(self.reasoning_context) < 8:
            return "developing"
        elif len(self.reasoning_context) < 15:
            return "mature"
        else:
            return "extensive"

    def _get_error_context(self, error: Exception) -> dict[str, Any]:
        """Get contextual information about an error"""
        return {
            "error_class": type(error).__name__,
            "reasoning_stage": f"loop_{self.current_loop_count}",
            "context_available": len(self.reasoning_context) > 0,
            "stack_state": "populated" if self.internal_task_stack else "empty"
        }

    async def _execute_internal_reasoning(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Execute internal reasoning meta-tool"""
        thought = args.get("thought", "")
        thought_number = args.get("thought_number", 1)
        total_thoughts = args.get("total_thoughts", 1)
        next_thought_needed = args.get("next_thought_needed", False)
        current_focus = args.get("current_focus", "")
        key_insights = args.get("key_insights", [])
        potential_issues = args.get("potential_issues", [])
        confidence_level = args.get("confidence_level", 0.5)

        # Structure the reasoning entry
        reasoning_entry = {
            "thought": thought,
            "thought_number": thought_number,
            "total_thoughts": total_thoughts,
            "next_thought_needed": next_thought_needed,
            "current_focus": current_focus,
            "key_insights": key_insights,
            "potential_issues": potential_issues,
            "confidence_level": confidence_level,
            "timestamp": datetime.now().isoformat()
        }

        # Add to internal reasoning log
        if not hasattr(self, 'internal_reasoning_log'):
            self.internal_reasoning_log = []
        self.internal_reasoning_log.append(reasoning_entry)

        # Format for context
        context_addition = f"""Internal Reasoning Step {thought_number}/{total_thoughts}:
Thought: {thought}
Focus: {current_focus}
Key Insights: {', '.join(key_insights) if key_insights else 'None'}
Potential Issues: {', '.join(potential_issues) if potential_issues else 'None'}
Confidence: {confidence_level}
Next Thought Needed: {next_thought_needed}"""

        return {"context_addition": context_addition}

    async def _execute_manage_task_stack(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Execute task stack management meta-tool"""
        action = args.get("action", "get_current").lower()
        task_description = args.get("task_description", "")

        if action == "add":
            self.internal_task_stack.append({
                "description": task_description,
                "status": "pending",
                "added_at": datetime.now().isoformat()
            })
            context_addition = f"Added to task stack: {task_description}"

        elif action == "remove":
            # Remove task by description match
            original_count = len(self.internal_task_stack)
            self.internal_task_stack = [
                task for task in self.internal_task_stack
                if task_description.lower() not in task["description"].lower()
            ]
            removed_count = original_count - len(self.internal_task_stack)
            context_addition = f"Removed {removed_count} task(s) matching: {task_description}"

        elif action == "complete":
            # Mark task as completed
            for task in self.internal_task_stack:
                if task_description.lower() in task["description"].lower():
                    task["status"] = "completed"
                    task["completed_at"] = datetime.now().isoformat()
            context_addition = f"Marked as completed: {task_description}"

        elif action == "get_current":
            if self.internal_task_stack:
                stack_summary = []
                for i, task in enumerate(self.internal_task_stack):
                    status = task["status"]
                    desc = task["description"]
                    stack_summary.append(f"{i + 1}. [{status.upper()}] {desc}")
                context_addition = "Current task stack:\n" + "\n".join(stack_summary)
            else:
                context_addition = "Task stack is empty"

        else:
            context_addition = f"Unknown task stack action: {action}"

        return {"context_addition": context_addition}

    async def _execute_delegate_llm_tool(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Execute delegation to LLMToolNode"""
        task_description = args.get("task_description", "")
        tools_list = args.get("tools_list", [])

        # Prepare shared state for LLMToolNode
        llm_tool_shared = {
            "current_task_description": task_description + '\nreturn all results in the final answer!',
            "current_query": task_description,
            "formatted_context": {
                "recent_interaction": f"Reasoner delegating task: {task_description}",
                "session_summary": self._get_reasoning_summary(),
                "task_context": f"Reasoning loop {self.current_loop_count}, delegated task. return all results!"
            },
            "variable_manager": prep_res.get("variable_manager"),
            "agent_instance": prep_res.get("agent_instance"),
            "available_tools": tools_list,  # Restrict to specific tools
            "tool_capabilities": prep_res.get("tool_capabilities", {}),
            "fast_llm_model": prep_res.get("fast_llm_model"),
            "complex_llm_model": prep_res.get("complex_llm_model"),
            "progress_tracker": prep_res.get("progress_tracker"),
            "session_id": prep_res.get("session_id"),
            "use_fast_response": True  # Use fast model for delegated tasks
        }

        # Execute LLMToolNode
        try:
            llm_tool_node = LLMToolNode()
            await llm_tool_node.run_async(llm_tool_shared)

            # Get results
            final_response = llm_tool_shared.get("current_response", "Task completed without specific result")
            tool_calls_made = llm_tool_shared.get("tool_calls_made", 0)

            context_addition = f"""Delegated Task Completed:
Task: {task_description}
Tools Available: {', '.join(tools_list)}
Tools Used: {tool_calls_made} tool calls made
Result: {final_response}"""

            return {"context_addition": context_addition}

        except Exception as e:
            context_addition = f"Delegation failed for task '{task_description}': {str(e)}"
            return {"context_addition": context_addition}

    async def _execute_create_plan(self, args: dict, prep_res: dict) -> dict[str, Any]:
        """Execute plan creation and execution"""
        goals = args.get("goals", [])

        if not goals:
            return {"context_addition": "No goals provided for plan creation"}

        try:
            # Prepare shared state for TaskPlanner
            planning_shared = prep_res.copy()
            planning_shared.update({
                "replan_context": {
                    "goals": goals,
                    "triggered_by": "llm_reasoner",
                    "reasoning_context": self._get_reasoning_summary()
                },
                "current_task_description": f"Execute plan with {len(goals)} goals",
                "current_query": f"Complex task: {'; '.join(goals)}"
            })

            # Execute TaskPlanner
            planner_node = TaskPlannerNode()
            plan_info = await planner_node.run_async(planning_shared)

            if plan_info == "planning_failed":
                return {"context_addition": f"Plan creation failed: {planning_shared.get('planning_error', 'Unknown error')}"}

            plan = planning_shared.get("current_plan")
            # Execute the plan using TaskExecutor
            executor_shared = planning_shared.copy()
            executor_node = TaskExecutorNode()

            # Execute plan to completion
            max_execution_cycles = 10
            execution_cycle = 0

            while execution_cycle < max_execution_cycles:
                execution_cycle += 1

                result = await executor_node.run_async(executor_shared)

                # Check completion status
                if result == "plan_completed" or result == "execution_error":
                    break
                elif result in ["continue_execution", "waiting"]:
                    continue
                else:
                    # Handle other results like reflection needs
                    if result in ["needs_dynamic_replan", "needs_plan_append"]:
                        # For now, just continue - could add reflection logic here
                        continue
                    break

            # Collect results
            completed_tasks = [
                task for task in plan.tasks
                if executor_shared["tasks"][task.id].status == "completed"
            ]

            failed_tasks = [
                task for task in plan.tasks
                if executor_shared["tasks"][task.id].status == "failed"
            ]

            # Build context addition with results
            results_summary = []
            results_store = executor_shared.get("results", {})

            for task in completed_tasks:
                task_result = results_store.get(task.id, {})
                if task_result.get("data"):
                    result_preview = str(task_result["data"])[:150] + "..."
                    results_summary.append(f"- {task.description}: {result_preview}")

            context_addition = f"""Plan Execution Completed:
Goals: {len(goals)} goals processed
Tasks Created: {len(plan.tasks)}
Tasks Completed: {len(completed_tasks)}
Tasks Failed: {len(failed_tasks)}
Execution Cycles: {execution_cycle}

Results Summary:
{chr(10).join(results_summary) if results_summary else 'No specific results captured'}"""

            return {"context_addition": context_addition}

        except Exception as e:
            import traceback
            print(traceback.format_exc())
            context_addition = f"Plan execution failed: {str(e)}"
            return {"context_addition": context_addition}

    def _get_reasoning_summary(self) -> str:
        """Get a summary of the reasoning process so far"""
        if not self.reasoning_context:
            return "No reasoning context available"

        summary_parts = []
        reasoning_entries = [entry for entry in self.reasoning_context if entry.get("type") == "reasoning"]

        for entry in reasoning_entries[-3:]:  # Last 3 reasoning steps
            content = entry.get("content", "")[:50000] + "..."
            loop_num = entry.get("loop", "?")
            summary_parts.append(f"Loop {loop_num}: {content}")

        return "\n".join(summary_parts)

    async def _create_error_response(self, query: str, error: str) -> str:
        """Create an error response"""
        return f"I encountered an error while processing your request: {error}. I was working on: {query}"

    async def _fallback_direct_response(self, prep_res: dict) -> dict[str, Any]:
        """Fallback when LLM is not available"""
        query = prep_res["original_query"]
        fallback_response = f"I received your request: {query}. However, I'm currently unable to process complex requests due to limited capabilities."

        return {
            "final_result": fallback_response,
            "reasoning_loops": 0,
            "reasoning_context": [{"type": "fallback", "content": "LLM unavailable"}],
            "internal_task_stack": []
        }
exec_async(prep_res) async

Enhanced main reasoning loop with outline-driven execution

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
async def exec_async(self, prep_res):
    """Enhanced main reasoning loop with outline-driven execution"""
    if not LITELLM_AVAILABLE:
        return await self._fallback_direct_response(prep_res)

    original_query = prep_res["original_query"]
    agent_instance = prep_res["agent_instance"]
    progress_tracker = prep_res.get("progress_tracker")

    # Initialize enhanced reasoning context
    await self._initialize_reasoning_session(prep_res, original_query)

    # STEP 1: MANDATORY OUTLINE CREATION
    if not self.outline:
        outline_result = await self._create_initial_outline(prep_res)
        if not outline_result:
            return await self._create_error_response(original_query, "Failed to create initial outline")

    final_result = None
    consecutive_no_progress = 0
    max_no_progress = 3

    # Enhanced main reasoning loop with strict progress tracking
    while self.current_reasoning_count < self.max_reasoning_loops:
        self.current_loop_count += 1
        loop_start_time = time.time()

        # Check for infinite loops
        if self._detect_infinite_loop():
            await self._trigger_auto_recovery(prep_res)
            if self.auto_recovery_attempts >= self.max_auto_recovery:
                break

        # Auto-context management
        await self._manage_context_size()

        # Progress tracking
        if progress_tracker:
            await progress_tracker.emit_event(ProgressEvent(
                event_type="reasoning_loop",
                timestamp=time.time(),
                node_name="LLMReasonerNode",
                status=NodeStatus.RUNNING,
                metadata={
                    "loop_number": self.current_loop_count,
                    "outline_step": self.current_outline_step,
                    "outline_total": len(self.outline.get("steps", [])) if self.outline else 0,
                    "context_size": len(self.reasoning_context),
                    "task_stack_size": len(self.internal_task_stack),
                    "auto_recovery_attempts": self.auto_recovery_attempts,
                    "performance_metrics": self.performance_metrics
                }
            ))

        try:
            # Build enhanced reasoning prompt with outline context
            reasoning_prompt = await self._build_outline_driven_prompt(prep_res)

            # Force progress check if needed
            if self.mandatory_progress_check and consecutive_no_progress >= 2:
                reasoning_prompt += "\n\n**MANDATORY**: You must either complete current outline step or move to next step. No more analysis without action!"

            # LLM reasoning call
            model_to_use = prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o")

            llm_response = await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": reasoning_prompt}],
                temperature=0.2,  # Lower temperature for more focused execution
                # max_tokens=3072,
                node_name="LLMReasonerNode",
                stop="<immediate_context>",
                task_id=f"reasoning_loop_{self.current_loop_count}_step_{self.current_outline_step}"
            )

            # Add LLM response to context
            self.reasoning_context.append({
                "type": "reasoning",
                "content": llm_response,
                "loop": self.current_loop_count,
                "outline_step": self.current_outline_step,
                "timestamp": datetime.now().isoformat()
            })

            # Parse and execute meta-tool calls with enhanced tracking
            progress_made = await self._parse_and_execute_meta_tools(llm_response, prep_res)

            action_taken = progress_made.get("action_taken", False)
            actual_progress = progress_made.get("progress_made", False)

            # Update performance with correct progress indication
            self._update_performance_metrics(loop_start_time, actual_progress)

            if not action_taken:
                self.current_reasoning_count += 1
                if self.current_outline_step > len(self.outline.get("steps", [])):
                    progress_made["final_result"] = llm_response
                    rprint("Final result reached forced by outline step count")
                if self.current_outline_step < len(self.outline.get("steps", [])) and self.outline.get("steps", [])[self.current_outline_step].get("is_final", False):
                    progress_made["final_result"] = llm_response
                    rprint("Final result reached forced by outline step count final step")
            else:
                self.current_reasoning_count -= 1

            # Check for final result
            if progress_made.get("final_result"):
                final_result = progress_made["final_result"]
                await self._finalize_reasoning_session(prep_res, final_result)
                break

            # Progress monitoring
            if progress_made.get("action_taken"):
                consecutive_no_progress = 0
                self._update_performance_metrics(loop_start_time, True)
            else:
                consecutive_no_progress += 1
                self._update_performance_metrics(loop_start_time, False)

            # Check outline completion
            if self.outline and self.current_outline_step >= len(self.outline.get("steps", []))+self.max_reasoning_loops:
                # All outline steps completed, force final response
                final_result = await self._create_outline_completion_response(prep_res)
                break

            # Emergency break for excessive no-progress
            if consecutive_no_progress >= max_no_progress:
                await self._trigger_auto_recovery(prep_res)

        except Exception as e:
            await self._handle_reasoning_error(e, prep_res, progress_tracker)
            import traceback
            print(traceback.format_exc())
            if self.auto_recovery_attempts >= self.max_auto_recovery:
                final_result = await self._create_error_response(original_query, str(e))
                break


    # If no final result after max loops, create a comprehensive summary
    if not final_result:
        final_result = await self._create_enhanced_timeout_response(original_query, prep_res)

    return {
        "final_result": final_result,
        "reasoning_loops": self.current_loop_count,
        "reasoning_context": self.reasoning_context.copy(),
        "internal_task_stack": self.internal_task_stack.copy(),
        "outline": self.outline,
        "outline_completion": self.current_outline_step,
        "performance_metrics": self.performance_metrics,
        "auto_recovery_attempts": self.auto_recovery_attempts
    }
post_async(shared, prep_res, exec_res) async

Enhanced post-processing with comprehensive data storage

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
6579
6580
6581
6582
6583
6584
6585
6586
6587
6588
6589
6590
6591
6592
6593
6594
6595
6596
6597
6598
6599
6600
6601
6602
6603
6604
6605
6606
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
async def post_async(self, shared, prep_res, exec_res):
    """Enhanced post-processing with comprehensive data storage"""
    final_result = exec_res.get("final_result", "Task processing incomplete")

    # Store comprehensive reasoning artifacts
    shared["reasoning_artifacts"] = {
        "reasoning_loops": exec_res.get("reasoning_loops", 0),
        "reasoning_context": exec_res.get("reasoning_context", []),
        "internal_task_stack": exec_res.get("internal_task_stack", []),
        "outline": exec_res.get("outline"),
        "outline_completion": exec_res.get("outline_completion", 0),
        "performance_metrics": exec_res.get("performance_metrics", {}),
        "auto_recovery_attempts": exec_res.get("auto_recovery_attempts", 0)
    }

    # Enhanced variable system updates
    if self.variable_manager:
        # Store final session results
        final_session_data = {
            "final_result": final_result,
            "completion_timestamp": datetime.now().isoformat(),
            "total_loops": exec_res.get("reasoning_loops", 0),
            "session_success": final_result != "Task processing incomplete",
            "outline_driven_execution": True
        }
        self.variable_manager.set("reasoning.current_session.final_data", final_session_data)

        # Update global performance statistics
        self._update_global_performance_stats(exec_res)

    # Set enhanced response data
    shared["llm_reasoner_result"] = final_result
    shared["current_response"] = final_result

    # Provide enhanced synthesis metadata
    shared["synthesized_response"] = {
        "synthesized_response": final_result,
        "confidence": self._calculate_confidence(exec_res),
        "metadata": {
            "synthesis_method": "outline_driven_reasoner",
            "reasoning_loops": exec_res.get("reasoning_loops", 0),
            "outline_completion": exec_res.get("outline_completion", 0),
            "performance_score": self._calculate_performance_score(exec_res),
            "auto_recovery_used": exec_res.get("auto_recovery_attempts", 0) > 0
        }
    }

    return "reasoner_complete"
prep_async(shared) async

Enhanced initialization with variable system integration

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081
async def prep_async(self, shared):
    """Enhanced initialization with variable system integration"""
    # Reset for new execution
    self.reasoning_context = []
    self.internal_task_stack = []
    self.current_loop_count = 0
    self.current_reasoning_count = 0
    self.outline = None
    self.current_outline_step = 0
    self.step_completion_tracking = {}
    self.loop_detection_memory = []
    self.performance_metrics = {
        "loop_times": [],
        "progress_loops": 0,
        "total_loops": 0
    }
    self.auto_recovery_attempts = 0
    self.last_action_signatures = []

    self.agent_instance = shared.get("agent_instance")

    # Enhanced variable manager integration
    self.variable_manager = shared.get("variable_manager", self.agent_instance.variable_manager)
    context_manager = shared.get("context_manager")

    if self.variable_manager:
        # Store reasoning session context
        session_context = {
            "session_id": shared.get("session_id", "default"),
            "start_time": datetime.now().isoformat(),
            "query": shared.get("current_query", ""),
            "reasoning_mode": "outline_driven"
        }
        self.variable_manager.set("reasoning.current_session", session_context)
        # Load previous successful patterns from variables
        self._load_historical_patterns()

    #Build comprehensive system context via UnifiedContextManager
    system_context = await self._build_enhanced_system_context_unified(shared, context_manager)

    return {
        "original_query": shared.get("current_query", ""),
        "session_id": shared.get("session_id", "default"),
        "agent_instance": shared.get("agent_instance"),
        "variable_manager": self.variable_manager,
        "context_manager": context_manager,  #Context Manager Reference
        "system_context": system_context,
        "available_tools": shared.get("available_tools", []),
        "tool_capabilities": shared.get("tool_capabilities", {}),
        "fast_llm_model": shared.get("fast_llm_model"),
        "complex_llm_model": shared.get("complex_llm_model"),
        "progress_tracker": shared.get("progress_tracker"),
        "formatted_context": shared.get("formatted_context", {}),
        "historical_context": await self._get_historical_context_unified(context_manager, shared.get("session_id")),
        "capabilities_summary": shared.get("capabilities_summary", ""),
        # Sub-system references
        "llm_tool_node": shared.get("llm_tool_node_instance"),
        "task_planner": shared.get("task_planner_instance"),
        "task_executor": shared.get("task_executor_instance"),
    }
LLMTask dataclass

Bases: Task

Spezialisierter Task für LLM-Aufrufe

Source code in toolboxv2/mods/isaa/base/Agent/types.py
470
471
472
473
474
475
476
477
478
479
480
@dataclass
class LLMTask(Task):
    """Spezialisierter Task für LLM-Aufrufe"""
    llm_config: dict[str, Any] = field(default_factory=lambda: {
        "model_preference": "fast",  # "fast" | "complex"
        "temperature": 0.7,
        "max_tokens": 1024
    })
    prompt_template: str = ""
    context_keys: list[str] = field(default_factory=list)  # Keys aus shared state
    output_schema: dict  = None  # JSON Schema für Validierung
LLMToolNode

Bases: AsyncNode

Enhanced LLM tool with automatic tool calling and agent loop integration

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
@with_progress_tracking
class LLMToolNode(AsyncNode):
    """Enhanced LLM tool with automatic tool calling and agent loop integration"""

    def __init__(self, model: str = None, max_tool_calls: int = 5, **kwargs):
        super().__init__(**kwargs)
        self.model = model or os.getenv("COMPLEXMODEL", "openrouter/qwen/qwen3-code")
        self.max_tool_calls = max_tool_calls
        self.call_log = []

    async def prep_async(self, shared):
        context = shared.get("formatted_context", {})
        task_description = shared.get("current_task_description", shared.get("current_query", ""))

        # Variable Manager integration
        variable_manager = shared.get("variable_manager")
        agent_instance = shared.get("agent_instance")

        return {
            "task_description": task_description,
            "context": context,
            "context_manager": shared.get("context_manager"),
            "session_id": shared.get("session_id"),
            "variable_manager": variable_manager,
            "agent_instance": agent_instance,
            "available_tools": shared.get("available_tools", []),
            "tool_capabilities": shared.get("tool_capabilities", {}),
            "persona_config": shared.get("persona_config"),
            "base_system_message": variable_manager.format_text(agent_instance.amd.get_system_message_with_persona()),
            "recent_interaction": context.get("recent_interaction", ""),
            "session_summary": context.get("session_summary", ""),
            "task_context": context.get("task_context", ""),
            "fast_llm_model": shared.get("fast_llm_model"),
            "complex_llm_model": shared.get("complex_llm_model"),
            "progress_tracker": shared.get("progress_tracker"),
            "tool_call_count": 0
        }

    async def exec_async(self, prep_res):
        """Main execution with tool calling loop"""
        if not LITELLM_AVAILABLE:
            return await self._fallback_response(prep_res)

        progress_tracker = prep_res.get("progress_tracker")

        conversation_history = []
        tool_call_count = 0
        final_response = None
        model_to_use = "auto"
        total_llm_calls = 0
        total_cost = 0.0
        total_tokens = 0

        # Initial system message with tool awareness
        system_message = self._build_tool_aware_system_message(prep_res)

        # Initial user prompt with variable resolution
        initial_prompt = await self._build_context_aware_prompt(prep_res)
        conversation_history.append({"role": "user", "content":  prep_res["variable_manager"].format_text(initial_prompt)})
        runs = 0
        while tool_call_count < self.max_tool_calls:
            runs += 1
            # Get LLM response
            messages = [{"role": "system", "content": system_message + ( "\nfist look at the context and reason over you intal step." if runs == 1 else "")}] + conversation_history

            model_to_use = self._select_optimal_model(prep_res["task_description"], prep_res)

            llm_start = time.perf_counter()

            try:
                agent_instance = prep_res["agent_instance"]
                response = await agent_instance.a_run_llm_completion(
                    model=model_to_use,
                    messages=messages,
                    temperature=0.7,
                    # max_tokens=2048,
                    node_name="LLMToolNode", task_id="llm_phase_" + str(runs)
                )

                llm_response = response
                if not llm_response and  not final_response:
                    final_response = "I encountered an error while processing your request."
                    break


                # Check for tool calls
                tool_calls = self._extract_tool_calls(llm_response)

                llm_response = prep_res["variable_manager"].format_text(llm_response)
                conversation_history.append({"role": "assistant", "content": llm_response})


                if not tool_calls:
                    # No more tool calls, this is the final response
                    final_response = llm_response
                    break

                # Execute tool calls
                tool_results = await self._execute_tool_calls(tool_calls, prep_res)
                tool_call_count += len(tool_calls)

                # Add tool results to conversation
                tool_results_text = self._format_tool_results(tool_results)
                final_response = tool_results_text
                conversation_history.append({"role": "user",
                                             "content": f"Tool results:\n{tool_results_text}\n\nPlease continue with the next action do nor repeat or provide your final response."})

                # Update variable manager with tool results
                self._update_variables_with_results(tool_results, prep_res["variable_manager"])

            except Exception as e:
                llm_duration = time.perf_counter() - llm_start

                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="llm_call",  # Konsistenter Event-Typ
                        node_name="LLMToolNode",
                        session_id=prep_res.get("session_id"),
                        status=NodeStatus.FAILED,
                        success=False,
                        duration=llm_duration,
                        llm_model=model_to_use,
                        error_details={
                            "message": str(e),
                            "type": type(e).__name__
                        },
                        metadata={"call_number": total_llm_calls + 1}
                    ))
                eprint(f"LLM tool execution failed: {e}")
                final_response = f"I encountered an error while processing: {str(e)}"
                import traceback
                print(traceback.format_exc())
                break


        return {
            "success": True,
            "final_response": final_response or "I was unable to complete the request.",
            "tool_calls_made": tool_call_count,
            "conversation_history": conversation_history,
            "model_used": model_to_use,
            "llm_statistics": {
                "total_calls": total_llm_calls,
                "total_cost": total_cost,
                "total_tokens": total_tokens
            }
        }

    def _build_tool_aware_system_message(self, prep_res: dict) -> str:
        """Build a unified intelligent, tool-aware system message with context and relevance analysis."""

        # Base system message
        base_message = prep_res.get("base_system_message", "You are a helpful AI assistant.")
        available_tools = prep_res.get("available_tools", [])
        tool_capabilities = prep_res.get("tool_capabilities", {})
        variable_manager = prep_res.get("variable_manager")
        context = prep_res.get("context", {})
        agent_instance = prep_res.get("agent_instance")
        query = prep_res.get('task_description', '').lower()

        base_message += ("\n\nAlways follow this action pattern"
                         "**THINK** -> **PLAN** -> (**ADJUST**) and (**UPDATE**) or **ACT** using tools!\n"
                         "all progress must be stored to ( variable system, memory, external services )!\n"
                         "if working on code or file based tasks, update and crate the files!\n"
                         "all text only steps ar discarded and not stored! only the final response is stored! ")

        # --- Part 1: List available tools & capabilities ---
        if available_tools:
            base_message += f"\n\n## Available Tools\nYou have access to these tools: {', '.join(available_tools)}\n"
            base_message += "Results will be stored to results.{tool_name}.data"

            for tool_name in available_tools:
                if tool_name in tool_capabilities:
                    cap = tool_capabilities[tool_name]
                    base_message += f"\n**{tool_name}**: {cap.get('primary_function', 'No description')}"
                    use_cases = cap.get('use_cases', [])
                    if use_cases:
                        base_message += f"\n  Use cases: {', '.join(use_cases[:3])}"

            # base_message += "\n\n## Tool Usage\nTo use tools, respond with:\nTOOL_CALL: tool_name(arg1='value1', arg2='value2')\nYou can make multiple tool calls in one response."
            base_message += """
## Tool Usage
To use tools, respond with a YAML block:
```yaml
TOOL_CALLS:
  - tool: tool_name
    args:
      arg1: value1
      arg2: value2
  - tool: another_tool
    args:
      code: |
        def example():
            return "multi-line code"
      text: |
        Multi-line text
        with arbitrary content
```
You can call multiple tools in one response. Use | for multi-line strings containing code or complex text."""

        # --- Part 2: Add variable context ---
        if variable_manager:
            var_context = variable_manager.get_llm_variable_context()
            if var_context:
                base_message += f"\n\n## Variable Context\n{var_context}"

        # --- Part 3: Intelligent tool analysis ---
        if not agent_instance or not hasattr(agent_instance, '_tool_capabilities'):
            return base_message + "\n\n⚠ No intelligent tool analysis available."

        capabilities = agent_instance._tool_capabilities
        analysis_parts = ["\n\n## Intelligent Tool Analysis"]

        for tool_name, cap in capabilities.items():
            analysis_parts.append(f"\n{tool_name}{cap.get('args_schema', '()')}:")
            analysis_parts.append(f"- Function: {cap.get('primary_function', 'Unknown')}")

            # Calculate relevance score
            relevance_score = self._calculate_tool_relevance(query, cap)
            analysis_parts.append(f"- Query relevance: {relevance_score:.2f}")

            if relevance_score > 0.65:
                analysis_parts.append("- ⭐ HIGHLY RELEVANT - SHOULD USE THIS TOOL!")

            # Trigger phrase matching
            triggers = cap.get('trigger_phrases', [])
            matched_triggers = [t for t in triggers if t.lower() in query]
            if matched_triggers:
                analysis_parts.append(f"- Matched triggers: {matched_triggers}")

            # Show top use cases
            use_cases = cap.get('use_cases', [])[:3]
            if use_cases:
                analysis_parts.append(f"- Use cases: {', '.join(use_cases)}")

        # Combine everything into a final message
        return base_message + "\n"+ "\n".join(analysis_parts)

    def _calculate_tool_relevance(self, query: str, capabilities: dict) -> float:
        """Calculate how relevant a tool is to the current query"""

        query_words = set(query.lower().split())

        # Check trigger phrases
        trigger_score = 0.0
        triggers = capabilities.get('trigger_phrases', [])
        for trigger in triggers:
            trigger_words = set(trigger.lower().split())
            if trigger_words.intersection(query_words):
                trigger_score += 0.04
        # Check confidence triggers if available
        conf_triggers = capabilities.get('confidence_triggers', {})
        for phrase, confidence in conf_triggers.items():
            if phrase.lower() in query:
                trigger_score += confidence/10
        # Check indirect connections
        indirect = capabilities.get('indirect_connections', [])
        for connection in indirect:
            connection_words = set(connection.lower().split())
            if connection_words.intersection(query_words):
                trigger_score += 0.02
        return min(1.0, trigger_score)

    @staticmethod
    def _extract_tool_calls_custom(text: str) -> list[dict]:
        """Extract tool calls from LLM response"""

        tool_calls = []

        pattern = r'TOOL_CALL:'
        matches = _extract_meta_tool_calls(text, pattern)

        for tool_name, args_str in matches:
            try:
                # Parse arguments
                args = _parse_tool_args(args_str)
                tool_calls.append({
                    "tool_name": tool_name,
                    "arguments": args
                })
            except Exception as e:
                wprint(f"Failed to parse tool call {tool_name}: {e}")

        return tool_calls

    @staticmethod
    def _extract_tool_calls(text: str) -> list[dict]:
        """Extract tool calls from LLM response using YAML format"""
        import re

        import yaml

        tool_calls = []

        # Pattern to find YAML blocks with TOOL_CALLS
        yaml_pattern = r'```yaml\s*\n(.*?TOOL_CALLS:.*?)\n```'
        yaml_matches = re.findall(yaml_pattern, text, re.DOTALL | re.IGNORECASE)

        # Also try without code blocks for simpler cases
        if not yaml_matches:
            simple_pattern = r'TOOL_CALLS:\s*\n((?:.*\n)*?)(?=\n\S|\Z)'
            simple_matches = re.findall(simple_pattern, text, re.MULTILINE)
            if simple_matches:
                yaml_matches = [f"TOOL_CALLS:\n{match}" for match in simple_matches]

        for yaml_content in yaml_matches:
            try:
                # Parse YAML content
                parsed_yaml = yaml.safe_load(yaml_content)

                if not isinstance(parsed_yaml, dict) or 'TOOL_CALLS' not in parsed_yaml:
                    continue

                calls = parsed_yaml['TOOL_CALLS']
                if not isinstance(calls, list):
                    calls = [calls]  # Handle single tool call

                for call in calls:
                    if isinstance(call, dict) and 'tool' in call:
                        tool_call = {
                            "tool_name": call['tool'],
                            "arguments": call.get('args', {})
                        }
                        tool_calls.append(tool_call)

            except yaml.YAMLError as e:
                wprint(f"Failed to parse YAML tool calls: {e}")
            except Exception as e:
                wprint(f"Error processing tool calls: {e}")

        return tool_calls

    def _select_optimal_model(self, task_description: str, prep_res: dict) -> str:
        """Select optimal model based on task complexity and available resources"""
        complexity_score = self._estimate_task_complexity(task_description, prep_res)
        if complexity_score > 0.7:
            return prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o")
        else:
            return prep_res.get("fast_llm_model", "openrouter/anthropic/claude-3-haiku")

    def _estimate_task_complexity(self, task_description: str, prep_res: dict) -> float:
        """Estimate task complexity based on description, length, and available tools"""
        # Simple heuristic: length + keyword matching + tool availability
        description_length_score = min(len(task_description) / 500, 1.0)  # cap at 1.0
        keywords = ["analyze", "research", "generate", "simulate", "complex", "deep", "strategy"]
        keyword_score = sum(1 for k in keywords if k in task_description.lower()) / len(keywords)
        tool_score = min(len(prep_res.get("available_tools", [])) / 10, 1.0)

        # Weighted sum
        complexity_score = (0.5 * description_length_score) + (0.3 * keyword_score) + (0.2 * tool_score)
        return round(complexity_score, 2)

    async def _fallback_response(self, prep_res: dict) -> dict:
        """Fallback response if LiteLLM is not available"""
        wprint("LiteLLM not available — using fallback response.")
        return {
            "success": False,
            "final_response": (
                "I'm unable to process this request fully right now because the LLM interface "
                "is not available. Please try again later or check system configuration."
            ),
            "tool_calls_made": 0,
            "conversation_history": [],
            "model_used": None
        }

    async def _execute_tool_calls(self, tool_calls: list[dict], prep_res: dict) -> list[dict]:
        """Execute tool calls via agent"""
        agent_instance = prep_res.get("agent_instance")
        variable_manager = prep_res.get("variable_manager")
        progress_tracker = prep_res.get("progress_tracker")

        results = []

        for tool_call in tool_calls:
            tool_name = tool_call["tool_name"]
            arguments = tool_call["arguments"]

            # Start tool tracking
            tool_start = time.perf_counter()

            if progress_tracker:
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="tool_call",
                    timestamp=time.time(),
                    status=NodeStatus.RUNNING,
                    node_name="LLMToolNode",
                    tool_name=tool_name,
                    tool_args=arguments,
                    session_id=prep_res.get("session_id"),
                    metadata={"tool_call_initiated": True}
                ))

            try:
                # Resolve variables in arguments
                if variable_manager:
                    resolved_args = {}
                    for key, value in arguments.items():
                        if isinstance(value, str):
                            resolved_args[key] = variable_manager.format_text(value)
                        else:
                            resolved_args[key] = value
                else:
                    resolved_args = arguments

                # Execute via agent
                result = await agent_instance.arun_function(tool_name, **resolved_args)
                tool_duration = time.perf_counter() - tool_start
                variable_manager.set(f"results.{tool_name}.data", result)
                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="tool_call",
                        timestamp=time.time(),
                        node_name="LLMToolNode",
                        status=NodeStatus.COMPLETED,
                        tool_name=tool_name,
                        tool_args=resolved_args,
                        tool_result=result,
                        duration=tool_duration,
                        success=True,
                        session_id=prep_res.get("session_id"),
                        metadata={
                            "result_type": type(result).__name__,
                            "result_length": len(str(result))
                        }
                    ))
                results.append({
                    "tool_name": tool_name,
                    "arguments": resolved_args,
                    "success": True,
                    "result": result
                })

            except Exception as e:
                tool_duration = time.perf_counter() - tool_start
                error_message = str(e)
                error_type = type(e).__name__
                import traceback
                print(traceback.format_exc())


                if progress_tracker:
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="tool_call",
                        timestamp=time.time(),
                        node_name="LLMToolNode",
                        status=NodeStatus.FAILED,
                        tool_name=tool_name,
                        tool_args=arguments,
                        duration=tool_duration,
                        success=False,
                        tool_error=error_message,
                        session_id=prep_res.get("session_id"),
                        metadata={
                            "error": error_message,
                            "error_message": error_message,
                            "error_type": error_type
                        }
                    ))

                    # FIXED: Also send generic error event for error log
                    await progress_tracker.emit_event(ProgressEvent(
                        event_type="error",
                        timestamp=time.time(),
                        node_name="LLMToolNode",
                        status=NodeStatus.FAILED,
                        success=False,
                        tool_name=tool_name,
                        metadata={
                            "error": error_message,
                            "error_message": error_message,
                            "error_type": error_type,
                            "source": "tool_execution",
                            "tool_name": tool_name,
                            "tool_args": arguments
                        }
                    ))
                eprint(f"Tool execution failed {tool_name}: {e}")
                results.append({
                    "tool_name": tool_name,
                    "arguments": arguments,
                    "success": False,
                    "error": str(e)
                })

        return results

    def _format_tool_results(self, results: list[dict]) -> str:
        """Format tool results for LLM"""
        formatted = []

        for result in results:
            if result["success"]:
                formatted.append(f"✓ {result['tool_name']}: {result['result']}")
            else:
                formatted.append(f"✗ {result['tool_name']}: ERROR - {result['error']}")

        return "\n".join(formatted)

    def _update_variables_with_results(self, results: list[dict], variable_manager):
        """Update variable manager with tool results"""
        if not variable_manager:
            return

        for i, result in enumerate(results):
            if result["success"]:
                tool_name = result['tool_name']
                result_data = result['result']

                # FIXED: Store result in proper variable paths
                variable_manager.set(f"results.{tool_name}.data", result_data)
                variable_manager.set(f"tools.{tool_name}.result", result_data)

                # Also store with index for multiple calls to same tool
                var_key = f"tool_result_{tool_name}_{i}"
                variable_manager.set(var_key, result_data)

    async def _build_context_aware_prompt(self, prep_res: dict) -> str:
        """Build context-aware prompt mit UnifiedContextManager Integration"""
        variable_manager = prep_res.get("variable_manager")
        agent_instance = prep_res.get("agent_instance")
        context = prep_res.get("context", {})

        #Get unified context if available
        context_manager = prep_res.get("context_manager")
        session_id = prep_res.get("session_id", "default")

        unified_context_parts = []

        if context_manager:
            try:
                # Get unified context für LLM Tool usage
                unified_context = await context_manager.build_unified_context(session_id, prep_res.get("task_description", ""))

                # Format unified context for LLM consumption
                chat_history = unified_context.get("chat_history", [])
                if chat_history:
                    unified_context_parts.append("## Recent Conversation from Session")
                    for msg in chat_history[-5:]:  # Last 5 messages
                        timestamp = msg.get('timestamp', '')[:19]
                        role = msg.get('role', 'unknown')
                        content = msg.get('content', '')[:300] + ("..." if len(msg.get('content', '')) > 300 else "")
                        unified_context_parts.append(f"[{timestamp}] {role}: {content}")

                # Execution state from unified context
                execution_state = unified_context.get("execution_state", {})
                if execution_state:
                    system_status = execution_state.get('system_status', 'unknown')
                    active_tasks = execution_state.get('active_tasks', [])
                    recent_completions = execution_state.get('recent_completions', [])

                    unified_context_parts.append("\n## Current System State")
                    unified_context_parts.append(f"Status: {system_status}")
                    if active_tasks:
                        unified_context_parts.append(f"Active Tasks: {len(active_tasks)}")
                    if recent_completions:
                        unified_context_parts.append(f"Recent Completions: {len(recent_completions)}")

                # Available results from unified context
                variables_context = unified_context.get("variables", {})
                recent_results = variables_context.get("recent_results", [])
                if recent_results:
                    unified_context_parts.append("\n## Available Results")
                    for result in recent_results[:3]:  # Top 3 results
                        task_id = result.get("task_id", "unknown")
                        preview = result.get("preview", "")[:100] + "..."
                        success = "✅" if result.get("success") else "❌"
                        unified_context_parts.append(f"{success} {task_id}: {preview}")

                # World model facts from unified context
                relevant_facts = unified_context.get("relevant_facts", [])
                if relevant_facts:
                    unified_context_parts.append("\n## Relevant Known Facts")
                    for key, value in relevant_facts[:3]:  # Top 3 facts
                        fact_preview = str(value)[:100] + ("..." if len(str(value)) > 100 else "")
                        unified_context_parts.append(f"- {key}: {fact_preview}")

            except Exception as e:
                unified_context_parts.append(f"## Context Error\nUnified context unavailable: {str(e)}")

        # EXISTIEREND: Keep existing context building (backwards compatibility)
        prompt_parts = []

        # Add unified context first (primary)
        if unified_context_parts:
            prompt_parts.extend(unified_context_parts)

        # Add existing context sections (secondary)
        recent_interaction = prep_res.get("recent_interaction", "")
        session_summary = prep_res.get("session_summary", "")
        task_context = prep_res.get("task_context", "")

        if recent_interaction:
            prompt_parts.append(f"\n## Recent Interaction Context\n{recent_interaction}")
            prompt_parts.append("\n**Important**: NO META_TOOL_CALLs needed in this section! and not avalabel\n use tools from Intelligent Tool Analysis only!")
        if session_summary:
            prompt_parts.append(f"\n## Session Summary\n{session_summary}")
        if task_context:
            prompt_parts.append(f"\n## Task Context\n{task_context}")

        # Add main task
        task_description = prep_res.get("task_description", "")
        if task_description:
            prompt_parts.append(f"\n## Current Request\n{task_description}")

        # Variable suggestions (existing functionality)
        if variable_manager and task_description:
            suggestions = variable_manager.get_variable_suggestions(task_description)
            if suggestions:
                prompt_parts.append(f"\n## Available Variables\nYou can use: {', '.join(suggestions)}")

        # Final variable resolution
        final_prompt = "\n".join(prompt_parts)
        if variable_manager:
            final_prompt = variable_manager.format_text(final_prompt)

        return final_prompt

    async def post_async(self, shared, prep_res, exec_res):
        shared["current_response"] = exec_res.get("final_response", "Task completed.")
        shared["tool_calls_made"] = exec_res.get("tool_calls_made", 0)
        shared["llm_tool_conversation"] = exec_res.get("conversation_history", [])
        shared["synthesized_response"] = {"synthesized_response":exec_res.get("final_response", "Task completed."),
                                          "confidence": (0.7 if exec_res.get("model_used") == prep_res.get("complex_llm_model") else 0.6) if exec_res.get("success", False) else 0,
                                          "metadata": exec_res.get("metadata", {"model_used": exec_res.get("model_used")}),
                                          "synthesis_method": "llm_tool"}
        return "llm_tool_complete"
exec_async(prep_res) async

Main execution with tool calling loop

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
async def exec_async(self, prep_res):
    """Main execution with tool calling loop"""
    if not LITELLM_AVAILABLE:
        return await self._fallback_response(prep_res)

    progress_tracker = prep_res.get("progress_tracker")

    conversation_history = []
    tool_call_count = 0
    final_response = None
    model_to_use = "auto"
    total_llm_calls = 0
    total_cost = 0.0
    total_tokens = 0

    # Initial system message with tool awareness
    system_message = self._build_tool_aware_system_message(prep_res)

    # Initial user prompt with variable resolution
    initial_prompt = await self._build_context_aware_prompt(prep_res)
    conversation_history.append({"role": "user", "content":  prep_res["variable_manager"].format_text(initial_prompt)})
    runs = 0
    while tool_call_count < self.max_tool_calls:
        runs += 1
        # Get LLM response
        messages = [{"role": "system", "content": system_message + ( "\nfist look at the context and reason over you intal step." if runs == 1 else "")}] + conversation_history

        model_to_use = self._select_optimal_model(prep_res["task_description"], prep_res)

        llm_start = time.perf_counter()

        try:
            agent_instance = prep_res["agent_instance"]
            response = await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=messages,
                temperature=0.7,
                # max_tokens=2048,
                node_name="LLMToolNode", task_id="llm_phase_" + str(runs)
            )

            llm_response = response
            if not llm_response and  not final_response:
                final_response = "I encountered an error while processing your request."
                break


            # Check for tool calls
            tool_calls = self._extract_tool_calls(llm_response)

            llm_response = prep_res["variable_manager"].format_text(llm_response)
            conversation_history.append({"role": "assistant", "content": llm_response})


            if not tool_calls:
                # No more tool calls, this is the final response
                final_response = llm_response
                break

            # Execute tool calls
            tool_results = await self._execute_tool_calls(tool_calls, prep_res)
            tool_call_count += len(tool_calls)

            # Add tool results to conversation
            tool_results_text = self._format_tool_results(tool_results)
            final_response = tool_results_text
            conversation_history.append({"role": "user",
                                         "content": f"Tool results:\n{tool_results_text}\n\nPlease continue with the next action do nor repeat or provide your final response."})

            # Update variable manager with tool results
            self._update_variables_with_results(tool_results, prep_res["variable_manager"])

        except Exception as e:
            llm_duration = time.perf_counter() - llm_start

            if progress_tracker:
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="llm_call",  # Konsistenter Event-Typ
                    node_name="LLMToolNode",
                    session_id=prep_res.get("session_id"),
                    status=NodeStatus.FAILED,
                    success=False,
                    duration=llm_duration,
                    llm_model=model_to_use,
                    error_details={
                        "message": str(e),
                        "type": type(e).__name__
                    },
                    metadata={"call_number": total_llm_calls + 1}
                ))
            eprint(f"LLM tool execution failed: {e}")
            final_response = f"I encountered an error while processing: {str(e)}"
            import traceback
            print(traceback.format_exc())
            break


    return {
        "success": True,
        "final_response": final_response or "I was unable to complete the request.",
        "tool_calls_made": tool_call_count,
        "conversation_history": conversation_history,
        "model_used": model_to_use,
        "llm_statistics": {
            "total_calls": total_llm_calls,
            "total_cost": total_cost,
            "total_tokens": total_tokens
        }
    }
PersonaConfig dataclass
Source code in toolboxv2/mods/isaa/base/Agent/types.py
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
@dataclass
class PersonaConfig:
    name: str
    style: str = "professional"
    personality_traits: list[str] = field(default_factory=lambda: ["helpful", "concise"])
    tone: str = "friendly"
    response_format: str = "direct"
    custom_instructions: str = ""

    format_config: FormatConfig  = None

    apply_method: str = "system_prompt"  # "system_prompt" | "post_process" | "both"
    integration_level: str = "light"  # "light" | "medium" | "heavy"

    def to_system_prompt_addition(self) -> str:
        """Convert persona to system prompt addition with format integration"""
        if self.apply_method in ["system_prompt", "both"]:
            additions = []
            additions.append(f"You are {self.name}.")
            additions.append(f"Your communication style is {self.style} with a {self.tone} tone.")

            if self.personality_traits:
                traits_str = ", ".join(self.personality_traits)
                additions.append(f"Your key traits are: {traits_str}.")

            if self.custom_instructions:
                additions.append(self.custom_instructions)

            # Format-spezifische Anweisungen hinzufügen
            if self.format_config:
                additions.append("\n" + self.format_config.get_combined_instructions())

            return " ".join(additions)
        return ""

    def update_format(self, response_format: ResponseFormat|str, text_length: TextLength|str, custom_instructions: str = ""):
        """Dynamische Format-Aktualisierung"""
        try:
            format_enum = ResponseFormat(response_format) if isinstance(response_format, str) else response_format
            length_enum = TextLength(text_length) if isinstance(text_length, str) else text_length

            if not self.format_config:
                self.format_config = FormatConfig()

            self.format_config.response_format = format_enum
            self.format_config.text_length = length_enum

            if custom_instructions:
                self.format_config.custom_instructions = custom_instructions


        except ValueError:
            raise ValueError(f"Invalid format '{response_format}' or length '{text_length}'")

    def should_post_process(self) -> bool:
        """Check if post-processing should be applied"""
        return self.apply_method in ["post_process", "both"]
should_post_process()

Check if post-processing should be applied

Source code in toolboxv2/mods/isaa/base/Agent/types.py
751
752
753
def should_post_process(self) -> bool:
    """Check if post-processing should be applied"""
    return self.apply_method in ["post_process", "both"]
to_system_prompt_addition()

Convert persona to system prompt addition with format integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
def to_system_prompt_addition(self) -> str:
    """Convert persona to system prompt addition with format integration"""
    if self.apply_method in ["system_prompt", "both"]:
        additions = []
        additions.append(f"You are {self.name}.")
        additions.append(f"Your communication style is {self.style} with a {self.tone} tone.")

        if self.personality_traits:
            traits_str = ", ".join(self.personality_traits)
            additions.append(f"Your key traits are: {traits_str}.")

        if self.custom_instructions:
            additions.append(self.custom_instructions)

        # Format-spezifische Anweisungen hinzufügen
        if self.format_config:
            additions.append("\n" + self.format_config.get_combined_instructions())

        return " ".join(additions)
    return ""
update_format(response_format, text_length, custom_instructions='')

Dynamische Format-Aktualisierung

Source code in toolboxv2/mods/isaa/base/Agent/types.py
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
def update_format(self, response_format: ResponseFormat|str, text_length: TextLength|str, custom_instructions: str = ""):
    """Dynamische Format-Aktualisierung"""
    try:
        format_enum = ResponseFormat(response_format) if isinstance(response_format, str) else response_format
        length_enum = TextLength(text_length) if isinstance(text_length, str) else text_length

        if not self.format_config:
            self.format_config = FormatConfig()

        self.format_config.response_format = format_enum
        self.format_config.text_length = length_enum

        if custom_instructions:
            self.format_config.custom_instructions = custom_instructions


    except ValueError:
        raise ValueError(f"Invalid format '{response_format}' or length '{text_length}'")
ProgressEvent dataclass

Enhanced progress event with better error handling

Source code in toolboxv2/mods/isaa/base/Agent/types.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
@dataclass
class ProgressEvent:

    """Enhanced progress event with better error handling"""

    # === 1. Kern-Attribute (Für jedes Event) ===
    event_type: str
    node_name: str
    timestamp: float = field(default_factory=time.time)
    event_id: str = field(default_factory=lambda: str(uuid.uuid4()))
    session_id: Optional[str] = None

    # === 2. Status und Ergebnis-Attribute ===
    status: Optional[NodeStatus] = None
    success: Optional[bool] = None
    duration: Optional[float] = None
    error_details: dict[str, Any] = field(default_factory=dict)  # Strukturiert: message, type, traceback

    # === 3. LLM-spezifische Attribute ===
    llm_model: Optional[str] = None
    llm_prompt_tokens: Optional[int] = None
    llm_completion_tokens: Optional[int] = None
    llm_total_tokens: Optional[int] = None
    llm_cost: Optional[float] = None
    llm_input: Optional[Any] = None  # Optional für Debugging, kann groß sein
    llm_output: Optional[str] = None # Optional für Debugging, kann groß sein

    # === 4. Tool-spezifische Attribute ===
    tool_name: Optional[str] = None
    is_meta_tool: Optional[bool] = None
    tool_args: Optional[dict[str, Any]] = None
    tool_result: Optional[Any] = None
    tool_error: Optional[str] = None
    llm_temperature: Optional[float]  = None

    # === 5. Strategie- und Kontext-Attribute ===
    agent_name: Optional[str] = None
    task_id: Optional[str] = None
    plan_id: Optional[str] = None


    # Node/Routing data
    routing_decision: Optional[str] = None
    node_phase: Optional[str] = None
    node_duration: Optional[float] = None

    # === 6. Metadaten (Für alles andere) ===
    metadata: dict[str, Any] = field(default_factory=dict)


    def __post_init__(self):

        if self.timestamp is None:
            self.timestamp = time.time()

        if self.metadata is None:
            self.metadata = {}
        if not self.event_id:
            self.event_id = f"{self.node_name}_{self.event_type}_{int(self.timestamp * 1000000)}"
        if 'error' in self.metadata or 'error_type' in self.metadata:
            if self.error_details is None:
                self.error_details = {}
            self.error_details['error'] = self.metadata.get('error')
            self.error_details['error_type'] = self.metadata.get('error_type')
            self.status = NodeStatus.FAILED
        if self.status == NodeStatus.FAILED:
            self.success = False
        if self.status == NodeStatus.COMPLETED:
            self.success = True

    def _to_dict(self) -> dict[str, Any]:
        """Convert ProgressEvent to dictionary with proper handling of all field types"""
        result = {}

        # Get all fields from the dataclass
        for field in fields(self):
            value = getattr(self, field.name)

            # Handle None values
            if value is None:
                result[field.name] = None
                continue

            # Handle NodeStatus enum
            if isinstance(value, NodeStatus | Enum):
                result[field.name] = value.value
            # Handle dataclass objects
            elif is_dataclass(value):
                result[field.name] = asdict(value)
            # Handle dictionaries (recursively process nested enums/dataclasses)
            elif isinstance(value, dict):
                result[field.name] = self._process_dict(value)
            # Handle lists (recursively process nested items)
            elif isinstance(value, list):
                result[field.name] = self._process_list(value)
            # Handle primitive types
            else:
                result[field.name] = value

        return result

    def _process_dict(self, d: dict[str, Any]) -> dict[str, Any]:
        """Recursively process dictionary values"""
        result = {}
        for k, v in d.items():
            if isinstance(v, Enum):
                result[k] = v.value
            elif is_dataclass(v):
                result[k] = asdict(v)
            elif isinstance(v, dict):
                result[k] = self._process_dict(v)
            elif isinstance(v, list):
                result[k] = self._process_list(v)
            else:
                result[k] = v
        return result

    def _process_list(self, lst: list[Any]) -> list[Any]:
        """Recursively process list items"""
        result = []
        for item in lst:
            if isinstance(item, Enum):
                result.append(item.value)
            elif is_dataclass(item):
                result.append(asdict(item))
            elif isinstance(item, dict):
                result.append(self._process_dict(item))
            elif isinstance(item, list):
                result.append(self._process_list(item))
            else:
                result.append(item)
        return result

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'ProgressEvent':
        """Create ProgressEvent from dictionary"""
        # Create a copy to avoid modifying the original
        data_copy = dict(data)

        # Handle NodeStatus enum conversion from string back to enum
        if 'status' in data_copy and data_copy['status'] is not None:
            if isinstance(data_copy['status'], str):
                try:
                    data_copy['status'] = NodeStatus(data_copy['status'])
                except (ValueError, TypeError):
                    # If invalid status value, set to None
                    data_copy['status'] = None

        # Filter out any keys that aren't valid dataclass fields
        field_names = {field.name for field in fields(cls)}
        filtered_data = {k: v for k, v in data_copy.items() if k in field_names}

        # Ensure metadata is properly initialized
        if 'metadata' not in filtered_data or filtered_data['metadata'] is None:
            filtered_data['metadata'] = {}

        return cls(**filtered_data)

    def to_dict(self) -> dict[str, Any]:
        """Return event data with None values removed for compact display"""
        data = self._to_dict()

        def clean_dict(d):
            if isinstance(d, dict):
                return {k: clean_dict(v) for k, v in d.items()
                        if v is not None and v != {} and v != [] and v != ''}
            elif isinstance(d, list):
                cleaned_list = [clean_dict(item) for item in d if item is not None]
                return [item for item in cleaned_list if item != {} and item != []]
            return d

        return clean_dict(data)

    def get_chat_display_data(self) -> dict[str, Any]:
        """Get data optimized for chat view display"""
        filtered = self.filter_none_values()

        # Core fields always shown
        core_data = {
            'event_type': filtered.get('event_type'),
            'node_name': filtered.get('node_name'),
            'timestamp': filtered.get('timestamp'),
            'event_id': filtered.get('event_id'),
            'status': filtered.get('status')
        }

        # Add specific fields based on event type
        if self.event_type == 'outline_created':
            if 'metadata' in filtered:
                core_data['outline_steps'] = len(filtered['metadata'].get('outline', []))
        elif self.event_type == 'reasoning_loop':
            if 'metadata' in filtered:
                core_data.update({
                    'loop_number': filtered['metadata'].get('loop_number'),
                    'outline_step': filtered['metadata'].get('outline_step'),
                    'context_size': filtered['metadata'].get('context_size')
                })
        elif self.event_type == 'tool_call':
            core_data.update({
                'tool_name': filtered.get('tool_name'),
                'is_meta_tool': filtered.get('is_meta_tool')
            })
        elif self.event_type == 'llm_call':
            core_data.update({
                'llm_model': filtered.get('llm_model'),
                'llm_total_tokens': filtered.get('llm_total_tokens'),
                'llm_cost': filtered.get('llm_cost')
            })

        # Remove None values from core_data
        return {k: v for k, v in core_data.items() if v is not None}

    def get_detailed_display_data(self) -> dict[str, Any]:
        """Get complete filtered data for detailed popup view"""
        return self.filter_none_values()

    def get_progress_summary(self) -> str:
        """Get a brief summary for progress sidebar"""
        if self.event_type == 'reasoning_loop' and 'metadata' in self.filter_none_values():
            metadata = self.filter_none_values()['metadata']
            loop_num = metadata.get('loop_number', '?')
            step = metadata.get('outline_step', '?')
            return f"Loop {loop_num}, Step {step}"
        elif self.event_type == 'tool_call':
            tool_name = self.tool_name or 'Unknown Tool'
            return f"{'Meta ' if self.is_meta_tool else ''}{tool_name}"
        elif self.event_type == 'llm_call':
            model = self.llm_model or 'Unknown Model'
            tokens = self.llm_total_tokens
            return f"{model} ({tokens} tokens)" if tokens else model
        else:
            return self.event_type.replace('_', ' ').title()
from_dict(data) classmethod

Create ProgressEvent from dictionary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'ProgressEvent':
    """Create ProgressEvent from dictionary"""
    # Create a copy to avoid modifying the original
    data_copy = dict(data)

    # Handle NodeStatus enum conversion from string back to enum
    if 'status' in data_copy and data_copy['status'] is not None:
        if isinstance(data_copy['status'], str):
            try:
                data_copy['status'] = NodeStatus(data_copy['status'])
            except (ValueError, TypeError):
                # If invalid status value, set to None
                data_copy['status'] = None

    # Filter out any keys that aren't valid dataclass fields
    field_names = {field.name for field in fields(cls)}
    filtered_data = {k: v for k, v in data_copy.items() if k in field_names}

    # Ensure metadata is properly initialized
    if 'metadata' not in filtered_data or filtered_data['metadata'] is None:
        filtered_data['metadata'] = {}

    return cls(**filtered_data)
get_chat_display_data()

Get data optimized for chat view display

Source code in toolboxv2/mods/isaa/base/Agent/types.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
def get_chat_display_data(self) -> dict[str, Any]:
    """Get data optimized for chat view display"""
    filtered = self.filter_none_values()

    # Core fields always shown
    core_data = {
        'event_type': filtered.get('event_type'),
        'node_name': filtered.get('node_name'),
        'timestamp': filtered.get('timestamp'),
        'event_id': filtered.get('event_id'),
        'status': filtered.get('status')
    }

    # Add specific fields based on event type
    if self.event_type == 'outline_created':
        if 'metadata' in filtered:
            core_data['outline_steps'] = len(filtered['metadata'].get('outline', []))
    elif self.event_type == 'reasoning_loop':
        if 'metadata' in filtered:
            core_data.update({
                'loop_number': filtered['metadata'].get('loop_number'),
                'outline_step': filtered['metadata'].get('outline_step'),
                'context_size': filtered['metadata'].get('context_size')
            })
    elif self.event_type == 'tool_call':
        core_data.update({
            'tool_name': filtered.get('tool_name'),
            'is_meta_tool': filtered.get('is_meta_tool')
        })
    elif self.event_type == 'llm_call':
        core_data.update({
            'llm_model': filtered.get('llm_model'),
            'llm_total_tokens': filtered.get('llm_total_tokens'),
            'llm_cost': filtered.get('llm_cost')
        })

    # Remove None values from core_data
    return {k: v for k, v in core_data.items() if v is not None}
get_detailed_display_data()

Get complete filtered data for detailed popup view

Source code in toolboxv2/mods/isaa/base/Agent/types.py
263
264
265
def get_detailed_display_data(self) -> dict[str, Any]:
    """Get complete filtered data for detailed popup view"""
    return self.filter_none_values()
get_progress_summary()

Get a brief summary for progress sidebar

Source code in toolboxv2/mods/isaa/base/Agent/types.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
def get_progress_summary(self) -> str:
    """Get a brief summary for progress sidebar"""
    if self.event_type == 'reasoning_loop' and 'metadata' in self.filter_none_values():
        metadata = self.filter_none_values()['metadata']
        loop_num = metadata.get('loop_number', '?')
        step = metadata.get('outline_step', '?')
        return f"Loop {loop_num}, Step {step}"
    elif self.event_type == 'tool_call':
        tool_name = self.tool_name or 'Unknown Tool'
        return f"{'Meta ' if self.is_meta_tool else ''}{tool_name}"
    elif self.event_type == 'llm_call':
        model = self.llm_model or 'Unknown Model'
        tokens = self.llm_total_tokens
        return f"{model} ({tokens} tokens)" if tokens else model
    else:
        return self.event_type.replace('_', ' ').title()
to_dict()

Return event data with None values removed for compact display

Source code in toolboxv2/mods/isaa/base/Agent/types.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def to_dict(self) -> dict[str, Any]:
    """Return event data with None values removed for compact display"""
    data = self._to_dict()

    def clean_dict(d):
        if isinstance(d, dict):
            return {k: clean_dict(v) for k, v in d.items()
                    if v is not None and v != {} and v != [] and v != ''}
        elif isinstance(d, list):
            cleaned_list = [clean_dict(item) for item in d if item is not None]
            return [item for item in cleaned_list if item != {} and item != []]
        return d

    return clean_dict(data)
ProgressTracker

Advanced progress tracking with cost calculation

Source code in toolboxv2/mods/isaa/base/Agent/types.py
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
class ProgressTracker:
    """Advanced progress tracking with cost calculation"""

    def __init__(self, progress_callback: callable  = None, agent_name="unknown"):
        self.progress_callback = progress_callback
        self.events: list[ProgressEvent] = []
        self.active_timers: dict[str, float] = {}

        # Cost tracking (simplified - would need actual provider pricing)
        self.token_costs = {
            "input": 0.00001,  # $0.01/1K tokens input
            "output": 0.00003,  # $0.03/1K tokens output
        }
        self.agent_name = agent_name

    async def emit_event(self, event: ProgressEvent):
        """Emit progress event with callback and storage"""
        self.events.append(event)
        event.agent_name = self.agent_name

        if self.progress_callback:
            try:
                if asyncio.iscoroutinefunction(self.progress_callback):
                    await self.progress_callback(event)
                else:
                    self.progress_callback(event)
            except Exception:
                import traceback
                print(traceback.format_exc())


    def start_timer(self, key: str) -> float:
        """Start timing operation"""
        start_time = time.perf_counter()
        self.active_timers[key] = start_time
        return start_time

    def end_timer(self, key: str) -> float:
        """End timing operation and return duration"""
        if key not in self.active_timers:
            return 0.0
        duration = time.perf_counter() - self.active_timers[key]
        del self.active_timers[key]
        return duration

    def calculate_llm_cost(self, model: str, input_tokens: int, output_tokens: int,completion_response:Any=None) -> float:
        """Calculate approximate LLM cost"""
        try:
            import litellm
            cost = litellm.completion_cost(model=model, completion_response=completion_response)
            return cost
        except ImportError:
            cost = 0.0
        # Simplified cost calculation - would need actual provider pricing
        input_cost = (input_tokens / 1000) * self.token_costs["input"]
        output_cost = (output_tokens / 1000) * self.token_costs["output"]
        return input_cost + output_cost

    def get_summary(self) -> dict[str, Any]:
        """Get comprehensive progress summary"""
        summary = {
            "total_events": len(self.events),
            "llm_calls": len([e for e in self.events if e.event_type == "llm_call"]),
            "tool_calls": len([e for e in self.events if e.event_type == "tool_call"]),
            "total_cost": sum(e.llm_cost for e in self.events if e.llm_cost),
            "total_tokens": sum(e.llm_total_tokens for e in self.events if e.llm_total_tokens),
            "total_duration": sum(e.node_duration for e in self.events if e.node_duration),
            "nodes_visited": list(set(e.node_name for e in self.events)),
            "tools_used": list(set(e.tool_name for e in self.events if e.tool_name)),
            "models_used": list(set(e.llm_model for e in self.events if e.llm_model))
        }
        return summary
calculate_llm_cost(model, input_tokens, output_tokens, completion_response=None)

Calculate approximate LLM cost

Source code in toolboxv2/mods/isaa/base/Agent/types.py
329
330
331
332
333
334
335
336
337
338
339
340
def calculate_llm_cost(self, model: str, input_tokens: int, output_tokens: int,completion_response:Any=None) -> float:
    """Calculate approximate LLM cost"""
    try:
        import litellm
        cost = litellm.completion_cost(model=model, completion_response=completion_response)
        return cost
    except ImportError:
        cost = 0.0
    # Simplified cost calculation - would need actual provider pricing
    input_cost = (input_tokens / 1000) * self.token_costs["input"]
    output_cost = (output_tokens / 1000) * self.token_costs["output"]
    return input_cost + output_cost
emit_event(event) async

Emit progress event with callback and storage

Source code in toolboxv2/mods/isaa/base/Agent/types.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
async def emit_event(self, event: ProgressEvent):
    """Emit progress event with callback and storage"""
    self.events.append(event)
    event.agent_name = self.agent_name

    if self.progress_callback:
        try:
            if asyncio.iscoroutinefunction(self.progress_callback):
                await self.progress_callback(event)
            else:
                self.progress_callback(event)
        except Exception:
            import traceback
            print(traceback.format_exc())
end_timer(key)

End timing operation and return duration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
321
322
323
324
325
326
327
def end_timer(self, key: str) -> float:
    """End timing operation and return duration"""
    if key not in self.active_timers:
        return 0.0
    duration = time.perf_counter() - self.active_timers[key]
    del self.active_timers[key]
    return duration
get_summary()

Get comprehensive progress summary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def get_summary(self) -> dict[str, Any]:
    """Get comprehensive progress summary"""
    summary = {
        "total_events": len(self.events),
        "llm_calls": len([e for e in self.events if e.event_type == "llm_call"]),
        "tool_calls": len([e for e in self.events if e.event_type == "tool_call"]),
        "total_cost": sum(e.llm_cost for e in self.events if e.llm_cost),
        "total_tokens": sum(e.llm_total_tokens for e in self.events if e.llm_total_tokens),
        "total_duration": sum(e.node_duration for e in self.events if e.node_duration),
        "nodes_visited": list(set(e.node_name for e in self.events)),
        "tools_used": list(set(e.tool_name for e in self.events if e.tool_name)),
        "models_used": list(set(e.llm_model for e in self.events if e.llm_model))
    }
    return summary
start_timer(key)

Start timing operation

Source code in toolboxv2/mods/isaa/base/Agent/types.py
315
316
317
318
319
def start_timer(self, key: str) -> float:
    """Start timing operation"""
    start_time = time.perf_counter()
    self.active_timers[key] = start_time
    return start_time
ResponseFinalProcessorNode

Bases: AsyncNode

Finale Verarbeitung mit Persona-System

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
@with_progress_tracking
class ResponseFinalProcessorNode(AsyncNode):
    """Finale Verarbeitung mit Persona-System"""

    async def prep_async(self, shared):
        return {
            "formatted_response": shared.get("formatted_response", {}),
            "quality_assessment": shared.get("quality_assessment", {}),
            "conversation_history": shared.get("conversation_history", []),
            "persona": shared.get("persona_config"),
            "fast_llm_model": shared.get("fast_llm_model"),
            "use_fast_response": shared.get("use_fast_response", True),
            "agent_instance": shared.get("agent_instance"),
        }

    async def exec_async(self, prep_res):
        response_data = prep_res["formatted_response"]
        raw_response = response_data.get("formatted_response", "I apologize, but I couldn't generate a response.")

        # Persona-basierte Anpassung
        if prep_res.get("persona") and LITELLM_AVAILABLE:
            final_response = await self._apply_persona_style(raw_response, prep_res)
        else:
            final_response = raw_response

        # Finale Metadaten
        processing_metadata = {
            "response_confidence": response_data.get("confidence", 0.0),
            "quality_score": prep_res.get("quality_assessment", {}).get("quality_score", 0.0),
            "processing_timestamp": datetime.now().isoformat(),
            "response_length": len(final_response),
            "persona_applied": prep_res.get("persona") is not None
        }

        return {
            "final_response": final_response,
            "metadata": processing_metadata,
            "status": "completed"
        }

    async def _apply_persona_style(self, response: str, prep_res: dict) -> str:
        """Optimized persona styling mit Konfiguration"""
        persona = prep_res["persona"]

        # Nur anwenden wenn post-processing konfiguriert
        if not persona.should_post_process():
            return response

        # Je nach Integration Level unterschiedliche Prompts
        if persona.integration_level == "light":
            style_prompt = f"Make this {persona.tone} and {persona.style}: {response}"
            max_tokens = 400
        elif persona.integration_level == "medium":
            style_prompt = f"""
    Apply {persona.name} persona (style: {persona.style}, tone: {persona.tone}) to:
    {response}

    Keep the same information, adjust presentation:"""
            max_tokens = 600
        else:  # heavy
            style_prompt = f"""
Completely transform as {persona.name}:
Style: {persona.style}, Tone: {persona.tone}
Traits: {', '.join(persona.personality_traits)}
Instructions: {persona.custom_instructions}

Original: {response}

As {persona.name}:"""
            max_tokens = 1000

        try:
            model_to_use = prep_res.get("fast_llm_model", "openrouter/anthropic/claude-3-haiku")
            agent_instance = prep_res["agent_instance"]
            if prep_res.get("use_fast_response", True):
                response = await agent_instance.a_run_llm_completion(
                    model=model_to_use,
                    messages=[{"role": "user", "content": style_prompt}],
                    temperature=0.5,
                    max_tokens=max_tokens, node_name="PersonaStylingNode", task_id="persona_styling_fast"
                )
            else:
                response = await agent_instance.a_run_llm_completion(
                    model=model_to_use,
                    messages=[{"role": "user", "content": style_prompt}],
                    temperature=0.6,
                    max_tokens=max_tokens + 200, node_name="PersonaStylingNode", task_id="persona_styling_ritch"
                )

            return response.strip()

        except Exception as e:
            wprint(f"Persona styling failed: {e}")
            return response

    async def post_async(self, shared, prep_res, exec_res):
        shared["current_response"] = exec_res["final_response"]
        shared["response_metadata"] = exec_res["metadata"]
        return "response_ready"
ResponseFormatterNode

Bases: AsyncNode

Formatiere finale Antwort für Benutzer

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
@with_progress_tracking
class ResponseFormatterNode(AsyncNode):
    """Formatiere finale Antwort für Benutzer"""

    async def prep_async(self, shared):
        return {
            "synthesized_response": shared.get("synthesized_response", {}),
            "original_query": shared.get("current_query", ""),
            "user_preferences": shared.get("user_preferences", {})
        }

    async def exec_async(self, prep_res):
        synthesis_data = prep_res["synthesized_response"]
        raw_response = synthesis_data.get("synthesized_response", "")

        if not raw_response:
            return {
                "formatted_response": "I apologize, but I was unable to generate a meaningful response to your query."}

        # Basis-Formatierung
        formatted_response = raw_response.strip()

        # Füge Metadaten hinzu falls gewünscht (für debugging/transparency)
        confidence = synthesis_data.get("confidence", 0.0)
        if confidence < 0.4:
            formatted_response += "\n\n*Note: This response has low confidence due to limited information.*"

        adaptation_note = ""
        synthesis_method = synthesis_data.get("synthesis_method", "unknown")
        if synthesis_method == "fallback":
            adaptation_note = "\n\n*Note: Response generated with limited processing capabilities.*"

        return {
            "formatted_response": formatted_response + adaptation_note,
            "confidence": confidence,
            "metadata": {
                "synthesis_method": synthesis_method,
                "response_length": len(formatted_response)
            }
        }

    async def post_async(self, shared, prep_res, exec_res):
        shared["formatted_response"] = exec_res
        return "formatted"
ResponseGenerationFlow

Bases: AsyncFlow

Intelligente Antwortgenerierung basierend auf Task-Ergebnissen

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
@with_progress_tracking
class ResponseGenerationFlow(AsyncFlow):
    """Intelligente Antwortgenerierung basierend auf Task-Ergebnissen"""

    def __init__(self, tools=None):
        # Nodes für Response-Pipeline
        self.context_aggregator = ContextAggregatorNode()
        self.result_synthesizer = ResultSynthesizerNode()
        self.response_formatter = ResponseFormatterNode()
        self.quality_checker = ResponseQualityNode()
        self.final_processor = ResponseFinalProcessorNode()

        # === RESPONSE GENERATION PIPELINE ===

        # Context Aggregation -> Synthesis
        self.context_aggregator - "context_ready" >> self.result_synthesizer
        self.context_aggregator - "no_context" >> self.response_formatter  # Fallback

        # Synthesis -> Formatting
        self.result_synthesizer - "synthesized" >> self.response_formatter
        self.result_synthesizer - "synthesis_failed" >> self.response_formatter

        # Formatting -> Quality Check
        self.response_formatter - "formatted" >> self.quality_checker
        self.response_formatter - "format_failed" >> self.final_processor  # Skip quality check

        # Quality Check -> Final Processing oder Retry
        self.quality_checker - "quality_good" >> self.final_processor
        self.quality_checker - "quality_poor" >> self.result_synthesizer  # Retry synthesis
        self.quality_checker - "quality_acceptable" >> self.final_processor

        super().__init__(start=self.context_aggregator)
ResponseQualityNode

Bases: AsyncNode

Prüfe Qualität der generierten Antwort

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
@with_progress_tracking
class ResponseQualityNode(AsyncNode):
    """Prüfe Qualität der generierten Antwort"""

    async def prep_async(self, shared):
        return {
            "formatted_response": shared.get("formatted_response", {}),
            "original_query": shared.get("current_query", ""),
            "format_config": self._get_format_config(shared),
            "fast_llm_model": shared.get("fast_llm_model"),
            "persona_config": shared.get("persona_config"),
            "agent_instance": shared.get("agent_instance"),
        }

    def _get_format_config(self, shared) -> FormatConfig | None:
        """Extrahiere Format-Konfiguration"""
        persona = shared.get("persona_config")
        if persona and hasattr(persona, 'format_config'):
            return persona.format_config
        return None

    async def exec_async(self, prep_res):
        response_data = prep_res["formatted_response"]
        response_text = response_data.get("formatted_response", "")
        original_query = prep_res["original_query"]
        format_config = prep_res["format_config"]

        # Basis-Qualitätsprüfung
        base_quality = self._heuristic_quality_check(response_text, original_query)

        # Format-spezifische Bewertung
        format_quality = await self._evaluate_format_adherence(response_text, format_config)

        # Längen-spezifische Bewertung
        length_quality = self._evaluate_length_adherence(response_text, format_config)

        # LLM-basierte Gesamtbewertung
        llm_quality = 0.5
        if LITELLM_AVAILABLE and len(response_text) > 500:
            llm_quality = await self._llm_format_quality_check(
                response_text, original_query, format_config, prep_res
            )

        # Gewichtete Gesamtbewertung
        total_quality = (
            base_quality * 0.3 +
            format_quality * 0.3 +
            length_quality * 0.2 +
            llm_quality * 0.2
        )

        quality_details = {
            "total_score": total_quality,
            "base_quality": base_quality,
            "format_adherence": format_quality,
            "length_adherence": length_quality,
            "llm_assessment": llm_quality,
            "format_config_used": format_config is not None
        }

        return {
            "quality_score": total_quality,
            "quality_assessment": self._score_to_assessment(total_quality),
            "quality_details": quality_details,
            "suggestions": self._generate_format_quality_suggestions(
                total_quality, response_text, format_config, quality_details
            )
        }

    async def _evaluate_format_adherence(self, response: str, format_config: FormatConfig | None) -> float:
        """Bewerte Format-Einhaltung"""
        if not format_config:
            return 0.8  # Neutral wenn kein Format vorgegeben

        format_type = format_config.response_format
        score = 0.5

        # Format-spezifische Checks
        if format_type == ResponseFormat.WITH_TABLES:
            if '|' in response or 'Table:' in response or '| ' in response:
                score += 0.4

        elif format_type == ResponseFormat.WITH_BULLET_POINTS:
            bullet_count = response.count('•') + response.count('-') + response.count('*')
            if bullet_count >= 2:
                score += 0.4
            elif bullet_count >= 1:
                score += 0.2

        elif format_type == ResponseFormat.WITH_LISTS:
            list_patterns = ['1.', '2.', '3.', 'a)', 'b)', 'c)']
            list_score = sum(1 for pattern in list_patterns if pattern in response)
            score += min(0.4, list_score * 0.1)

        elif format_type == ResponseFormat.MD_TEXT:
            md_elements = ['#', '**', '*', '`', '```', '[', ']', '(', ')']
            md_score = sum(1 for element in md_elements if element in response)
            score += min(0.4, md_score * 0.05)

        elif format_type == ResponseFormat.YAML_TEXT:
            if response.strip().startswith(('```yaml', '---')) or ': ' in response:
                score += 0.4

        elif format_type == ResponseFormat.JSON_TEXT:
            if response.strip().startswith(('{', '[')):
                try:
                    json.loads(response)
                    score += 0.4
                except:
                    score += 0.1  # Partial credit for JSON-like structure

        elif format_type == ResponseFormat.TEXT_ONLY:
            # Penalize if formatting elements are present
            format_elements = ['#', '*', '|', '```', '1.', '•', '-']
            format_count = sum(1 for element in format_elements if element in response)
            score += max(0.1, 0.5 - format_count * 0.05)

        elif format_type == ResponseFormat.PSEUDO_CODE:
            code_indicators = ['if ', 'for ', 'while ', 'def ', 'return ', 'function', 'BEGIN', 'END']
            code_score = sum(1 for indicator in code_indicators if indicator in response)
            score += min(0.4, code_score * 0.1)

        return max(0.0, min(1.0, score))

    def _evaluate_length_adherence(self, response: str, format_config: FormatConfig | None) -> float:
        """Bewerte Längen-Einhaltung"""
        if not format_config:
            return 0.8

        word_count = len(response.split())
        min_words, max_words = format_config.get_expected_word_range()

        if min_words <= word_count <= max_words:
            return 1.0
        elif word_count < min_words:
            # Zu kurz - sanfte Bestrafung
            ratio = word_count / min_words
            return max(0.3, ratio * 0.8)
        else:  # word_count > max_words
            # Zu lang - weniger Bestrafung als zu kurz
            excess_ratio = (word_count - max_words) / max_words
            return max(0.4, 1.0 - excess_ratio * 0.3)

    async def _llm_format_quality_check(
        self,
        response: str,
        query: str,
        format_config: FormatConfig | None,
        prep_res: dict
    ) -> float:
        """LLM-basierte Format- und Qualitätsbewertung"""
        if not format_config:
            return await self._standard_llm_quality_check(response, query, prep_res)

        format_desc = format_config.get_format_instructions()
        length_desc = format_config.get_length_instructions()

        prompt = f"""
Bewerte diese Antwort auf einer Skala von 0.0 bis 1.0 basierend auf Format-Einhaltung und Qualität:

Benutzer-Anfrage: {query}

Antwort: {response}

Erwartetes Format: {format_desc}
Erwartete Länge: {length_desc}

Bewertungskriterien:
1. Format-Einhaltung (40%): Entspricht die Antwort dem geforderten Format?
2. Längen-Angemessenheit (25%): Ist die Länge angemessen?
3. Inhaltliche Qualität (25%): Beantwortet die Anfrage vollständig?
4. Lesbarkeit und Struktur (10%): Ist die Antwort gut strukturiert?

Antworte nur mit einer Zahl zwischen 0.0 und 1.0:"""

        try:
            model_to_use = prep_res.get("fast_llm_model", "openrouter/anthropic/claude-3-haiku")
            agent_instance = prep_res["agent_instance"]
            score_text = (await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.1,
                max_tokens=10,
                node_name="QualityAssessmentNode", task_id="format_quality_assessment"
            )).strip()

            return float(score_text)

        except Exception as e:
            wprint(f"LLM format quality check failed: {e}")
            return 0.6  # Neutral fallback

    def _generate_format_quality_suggestions(
        self,
        score: float,
        response: str,
        format_config: FormatConfig | None,
        quality_details: dict
    ) -> list[str]:
        """Generiere Format-spezifische Verbesserungsvorschläge"""
        suggestions = []

        if not format_config:
            return ["Consider defining a specific response format for better consistency"]

        # Format-spezifische Vorschläge
        if quality_details["format_adherence"] < 0.6:
            format_type = format_config.response_format

            if format_type == ResponseFormat.WITH_TABLES:
                suggestions.append("Add tables using markdown format (| Column | Column |)")
            elif format_type == ResponseFormat.WITH_BULLET_POINTS:
                suggestions.append("Use bullet points (•, -, *) to structure information")
            elif format_type == ResponseFormat.MD_TEXT:
                suggestions.append("Use markdown formatting (headers, bold, code blocks)")
            elif format_type == ResponseFormat.YAML_TEXT:
                suggestions.append("Format response as valid YAML structure")
            elif format_type == ResponseFormat.JSON_TEXT:
                suggestions.append("Format response as valid JSON")

        # Längen-spezifische Vorschläge
        if quality_details["length_adherence"] < 0.6:
            word_count = len(response.split())
            min_words, max_words = format_config.get_expected_word_range()

            if word_count < min_words:
                suggestions.append(f"Response too short ({word_count} words). Aim for {min_words}-{max_words} words")
            else:
                suggestions.append(f"Response too long ({word_count} words). Aim for {min_words}-{max_words} words")

        # Qualitäts-spezifische Vorschläge
        if score < 0.5:
            suggestions.append("Overall quality needs improvement - consider regenerating")
        elif score < 0.7:
            suggestions.append("Good response but could be enhanced with better format adherence")

        return suggestions

    async def _standard_llm_quality_check(self, response: str, query: str, prep_res: dict) -> float:
        """Standard LLM-Qualitätsprüfung ohne Format-Fokus"""
        # Bestehende Implementierung beibehalten
        return await self._llm_quality_check(response, query, prep_res)

    def _heuristic_quality_check(self, response: str, query: str) -> float:
        """Heuristische Qualitätsprüfung"""
        score = 0.5  # Base score

        # Length check
        if len(response) < 50:
            score -= 0.3
        elif len(response) > 100:
            score += 0.2

        # Query term coverage
        query_terms = set(query.lower().split())
        response_terms = set(response.lower().split())
        coverage = len(query_terms.intersection(response_terms)) / max(len(query_terms), 1)
        score += coverage * 0.3

        # Structure indicators
        if any(indicator in response for indicator in [":", "-", "1.", "•"]):
            score += 0.1  # Structured response bonus

        return max(0.0, min(1.0, score))

    async def _llm_quality_check(self, response: str, query: str, prep_res: dict) -> float:
        """LLM-basierte Qualitätsprüfung"""
        try:
            prompt = f"""
Rate the quality of this response to the user's query on a scale of 0.0 to 1.0.

User Query: {query}

Response: {response}

Consider:
- Relevance to the query
- Completeness of information
- Clarity and readability
- Accuracy (if verifiable)

Respond with just a number between 0.0 and 1.0:"""

            model_to_use = prep_res.get("fast_llm_model", "openrouter/anthropic/claude-3-haiku")
            agent_instance = prep_res["agent_instance"]
            score_text = (await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.1,
                max_tokens=10,
                node_name="QualityAssessmentNode", task_id="quality_assessment"
            )).strip()

            return float(score_text)

        except:
            return 0.5  # Fallback score

    def _score_to_assessment(self, score: float) -> str:
        if score >= 0.8:
            return "quality_good"
        elif score >= 0.5:
            return "quality_acceptable"
        else:
            return "quality_poor"

    async def post_async(self, shared, prep_res, exec_res):
        shared["quality_assessment"] = exec_res
        return exec_res["quality_assessment"]
ResultSynthesizerNode

Bases: AsyncNode

Synthetisiere finale Antwort aus allen Ergebnissen

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
@with_progress_tracking
class ResultSynthesizerNode(AsyncNode):
    """Synthetisiere finale Antwort aus allen Ergebnissen"""

    async def prep_async(self, shared):
        return {
            "aggregated_context": shared.get("aggregated_context", {}),
            "fast_llm_model": shared.get("fast_llm_model"),
            "complex_llm_model": shared.get("complex_llm_model"),
            "agent_instance": shared.get("agent_instance")
        }

    async def exec_async(self, prep_res):
        if not LITELLM_AVAILABLE:
            return await self._fallback_synthesis(prep_res)

        context = prep_res["aggregated_context"]
        persona = (prep_res['agent_instance'].amd.persona.to_system_prompt_addition() if not prep_res['agent_instance'].amd.persona.should_post_process() else '') if prep_res['agent_instance'].amd.persona else None
        prompt = f"""
Du bist ein Experte für Informationssynthese. Erstelle eine umfassende, hilfreiche Antwort basierend auf den gesammelten Ergebnissen.

## Ursprüngliche Anfrage
{context.get('original_query', '')}

## Erfolgreiche Ergebnisse
{self._format_successful_results(context.get('successful_results', {}))}

## Wichtige Entdeckungen
{self._format_key_discoveries(context.get('key_discoveries', []))}

## Plan-Adaptationen
{context.get('adaptation_summary', 'No adaptations were needed.')}

## Fehlgeschlagene Versuche
{self._format_failed_attempts(context.get('failed_attempts', {}))}

{persona}

## Anweisungen
1. Gib eine direkte, hilfreiche Antwort auf die ursprüngliche Anfrage
2. Integriere alle relevanten gefundenen Informationen
3. Erkläre kurz den Prozess, falls Adaptationen nötig waren
4. Sei ehrlich über Limitationen oder fehlende Informationen
5. Strukturiere die Antwort logisch und lesbar

Erstelle eine finale Antwort:"""

        try:
            # Verwende complex model für finale Synthesis
            model_to_use = prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o")
            agent_instance = prep_res["agent_instance"]
            synthesized_response = await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.3,
                max_tokens=1500,
                node_name="ResultSynthesizerNode", task_id="response_synthesis"
            )

            return {
                "synthesized_response": synthesized_response,
                "synthesis_method": "llm",
                "model_used": model_to_use,
                "confidence": self._estimate_synthesis_confidence(context)
            }

        except Exception as e:
            eprint(f"LLM synthesis failed: {e}")
            return await self._fallback_synthesis(prep_res)

    def _format_successful_results(self, results: dict) -> str:
        formatted = []
        for _task_id, result_info in results.items():
            formatted.append(f"- {result_info['task_description']}: {str(result_info['result'])[:20000]}...")
        return "\n".join(formatted) if formatted else "No successful results to report."

    def _format_key_discoveries(self, discoveries: list) -> str:
        formatted = []
        for discovery in discoveries:
            confidence = discovery.get('confidence', 0.0)
            formatted.append(f"- {discovery['discovery']} (Confidence: {confidence:.2f})")
        return "\n".join(formatted) if formatted else "No key discoveries."

    def _format_failed_attempts(self, failed: dict) -> str:
        if not failed:
            return "No significant failures."
        formatted = [f"- {info['description']}: {info['error']}" for info in failed.values()]
        return "\n".join(formatted)

    async def _fallback_synthesis(self, prep_res) -> dict:
        """Fallback synthesis ohne LLM"""
        context = prep_res["aggregated_context"]

        # Einfache Template-basierte Synthese
        response_parts = []

        if context.get("key_discoveries"):
            response_parts.append("Based on my analysis, I found:")
            for discovery in context["key_discoveries"][:3]:  # Top 3
                response_parts.append(f"- {discovery['discovery']}")

        if context.get("successful_results"):
            response_parts.append("\nDetailed results:")
            for _task_id, result in list(context["successful_results"].items())[:2]:  # Top 2
                response_parts.append(f"- {result['task_description']}: {str(result['result'])[:150]}")

        if context.get("adaptation_summary"):
            response_parts.append(f"\n{context['adaptation_summary']}")

        fallback_response = "\n".join(
            response_parts) if response_parts else "I was unable to complete the requested task effectively."

        return {
            "synthesized_response": fallback_response,
            "synthesis_method": "fallback",
            "confidence": 0.3
        }

    def _estimate_synthesis_confidence(self, context: dict) -> float:
        """Schätze Confidence der Synthese"""
        confidence = 0.5  # Base confidence

        # Boost für erfolgreiche Ergebnisse
        successful_count = len(context.get("successful_results", {}))
        confidence += min(successful_count * 0.15, 0.3)

        # Boost für key discoveries mit hoher confidence
        for discovery in context.get("key_discoveries", []):
            discovery_conf = discovery.get("confidence", 0.0)
            confidence += discovery_conf * 0.1

        # Penalty für viele fehlgeschlagene Versuche
        failed_count = len(context.get("failed_attempts", {}))
        confidence -= min(failed_count * 0.1, 0.2)

        return max(0.1, min(1.0, confidence))

    async def post_async(self, shared, prep_res, exec_res):
        shared["synthesized_response"] = exec_res
        if exec_res.get("synthesized_response"):
            return "synthesized"
        else:
            return "synthesis_failed"
StateSyncNode

Bases: AsyncNode

Synchronize state between world model and shared store

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
@with_progress_tracking
class StateSyncNode(AsyncNode):
    """Synchronize state between world model and shared store"""
    async def prep_async(self, shared):
        world_model = shared.get("world_model", {})
        session_data = shared.get("session_data", {})
        tasks = shared.get("tasks", {})
        system_status = shared.get("system_status", "idle")

        return {
            "world_model": world_model,
            "session_data": session_data,
            "tasks": tasks,
            "system_status": system_status,
            "sync_timestamp": datetime.now().isoformat()
        }

    async def exec_async(self, prep_res):
        # Perform intelligent state synchronization
        sync_result = {
            "world_model_updates": {},
            "session_updates": {},
            "task_updates": {},
            "conflicts_resolved": [],
            "sync_successful": True
        }

        # Update world model with new information
        if "current_response" in prep_res:
            # Extract learnable facts from responses
            extracted_facts = self._extract_facts(prep_res.get("current_response", ""))
            sync_result["world_model_updates"].update(extracted_facts)

        # Sync task states
        for task_id, task in prep_res["tasks"].items():
            if task.status == "completed" and task.result:
                # Store task results in world model
                fact_key = f"task_{task_id}_result"
                sync_result["world_model_updates"][fact_key] = task.result

        return sync_result

    def _extract_facts(self, text: str) -> dict[str, Any]:
        """Extract learnable facts from text"""
        facts = {}
        lines = text.split('\n')

        for line in lines:
            line = line.strip()
            # Look for definitive statements
            if ' is ' in line and not line.startswith('I ') and not '?' in line:
                parts = line.split(' is ', 1)
                if len(parts) == 2:
                    subject = parts[0].strip().lower()
                    predicate = parts[1].strip().rstrip('.')
                    if len(subject.split()) <= 3:  # Keep subjects simple
                        facts[subject] = predicate

        return facts

    async def post_async(self, shared, prep_res, exec_res):
        # Apply the synchronization results
        if exec_res["sync_successful"]:
            shared["world_model"].update(exec_res["world_model_updates"])
            shared["session_data"].update(exec_res["session_updates"])
            shared["last_sync"] = datetime.now()
            return "sync_complete"
        else:
            wprint("State synchronization failed")
            return "sync_failed"
Task dataclass
Source code in toolboxv2/mods/isaa/base/Agent/types.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
@dataclass
class Task:
    id: str
    type: str
    description: str
    status: str = "pending"  # pending, running, completed, failed, paused
    priority: int = 1
    dependencies: list[str] = field(default_factory=list)
    subtasks: list[str] = field(default_factory=list)
    result: Any = None
    error: str = None
    created_at: datetime = field(default_factory=datetime.now)
    started_at: datetime  = None
    completed_at: datetime  = None
    metadata: dict[str, Any] = field(default_factory=dict)
    retry_count: int = 0
    max_retries: int = 3
    critical: bool = False

    task_identification_attr: bool = True


    def __post_init__(self):
        """Ensure all mutable defaults are properly initialized"""
        if self.metadata is None:
            self.metadata = {}
        if self.dependencies is None:
            self.dependencies = []
        if self.subtasks is None:
            self.subtasks = []

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        setattr(self, key, value)
__post_init__()

Ensure all mutable defaults are properly initialized

Source code in toolboxv2/mods/isaa/base/Agent/types.py
444
445
446
447
448
449
450
451
def __post_init__(self):
    """Ensure all mutable defaults are properly initialized"""
    if self.metadata is None:
        self.metadata = {}
    if self.dependencies is None:
        self.dependencies = []
    if self.subtasks is None:
        self.subtasks = []
TaskExecutorNode

Bases: AsyncNode

Vollständige Task-Ausführung als unabhängige Node mit LLM-unterstützter Planung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
@with_progress_tracking
class TaskExecutorNode(AsyncNode):
    """Vollständige Task-Ausführung als unabhängige Node mit LLM-unterstützter Planung"""

    def __init__(self, max_parallel: int = 3, **kwargs):
        super().__init__(**kwargs)
        self.max_parallel = max_parallel
        self.results_store = {}  # Für {{ }} Referenzen
        self.execution_history = []  # Für LLM-basierte Optimierung
        self.agent_instance = None  # Wird gesetzt vom FlowAgent
        self.variable_manager = None
        self.fast_llm_model = None
        self.complex_llm_model = None
        self.progress_tracker = None

    async def prep_async(self, shared):
        """Enhanced preparation with unified variable system"""
        current_plan = shared.get("current_plan")
        tasks = shared.get("tasks", {})

        # Get unified variable manager
        self.variable_manager = shared.get("variable_manager")
        self.progress_tracker = shared.get("progress_tracker")
        if not self.variable_manager:
            self.variable_manager = VariableManager(shared.get("world_model", {}), shared)

        # Register all necessary scopes
        self.variable_manager.set_results_store(self.results_store)
        self.variable_manager.set_tasks_store(tasks)
        self.variable_manager.register_scope('user', shared.get('user_context', {}))
        self.variable_manager.register_scope('system', {
            'timestamp': datetime.now().isoformat(),
            'agent_name': shared.get('agent_instance', {}).amd.name if shared.get('agent_instance') else 'unknown'
        })

        # Stelle sicher, dass Agent-Referenz verfügbar ist
        if not self.agent_instance:
            self.agent_instance = shared.get("agent_instance")

        if not current_plan:
            return {"error": "No active plan", "tasks": tasks}

        # Rest of existing prep_async logic...
        ready_tasks = self._find_ready_tasks(current_plan, tasks)
        blocked_tasks = self._find_blocked_tasks(current_plan, tasks)

        execution_plan = await self._create_intelligent_execution_plan(
            ready_tasks, blocked_tasks, current_plan, shared
        )
        self.complex_llm_model = shared.get("complex_llm_model")
        self.fast_llm_model = shared.get("fast_llm_model")

        return {
            "plan": current_plan,
            "ready_tasks": ready_tasks,
            "blocked_tasks": blocked_tasks,
            "all_tasks": tasks,
            "execution_plan": execution_plan,
            "fast_llm_model": self.fast_llm_model,
            "complex_llm_model": self.complex_llm_model,
            "available_tools": shared.get("available_tools", []),
            "world_model": shared.get("world_model", {}),
            "results": self.results_store,
            "variable_manager": self.variable_manager,
            "progress_tracker": self.progress_tracker ,
        }

    def _find_ready_tasks(self, plan: TaskPlan, all_tasks: dict[str, Task]) -> list[Task]:
        """Finde Tasks die zur Ausführung bereit sind"""
        ready = []
        for task in plan.tasks:
            if task.status == "pending" and self._dependencies_satisfied(task, all_tasks):
                ready.append(task)
        return ready

    def _find_blocked_tasks(self, plan: TaskPlan, all_tasks: dict[str, Task]) -> list[Task]:
        """Finde blockierte Tasks für Analyse"""
        blocked = []
        for task in plan.tasks:
            if task.status == "pending" and not self._dependencies_satisfied(task, all_tasks):
                blocked.append(task)
        return blocked

    def _dependencies_satisfied(self, task: Task, all_tasks: dict[str, Task]) -> bool:
        """Prüfe ob alle Dependencies erfüllt sind"""
        for dep_id in task.dependencies:
            if dep_id in all_tasks:
                dep_task = all_tasks[dep_id]
                if dep_task.status not in ["completed"]:
                    return False
            else:
                # Dependency existiert nicht - könnte Problem sein
                wprint(f"Task {task.id} has missing dependency: {dep_id}")
                return False
        return True

    async def _create_intelligent_execution_plan(
        self,
        ready_tasks: list[Task],
        blocked_tasks: list[Task],
        plan: TaskPlan,
        shared: dict
    ) -> dict[str, Any]:
        """LLM-unterstützte intelligente Ausführungsplanung"""

        if not ready_tasks:
            return {
                "strategy": "waiting",
                "reason": "No ready tasks",
                "blocked_count": len(blocked_tasks),
                "recommendations": []
            }

        # Einfache Planung für wenige Tasks
        if len(ready_tasks) <= 2 and not LITELLM_AVAILABLE:
            return self._create_simple_execution_plan(ready_tasks, plan)

        # LLM-basierte intelligente Planung
        return await self._llm_execution_planning(ready_tasks, blocked_tasks, plan, shared)

    def _create_simple_execution_plan(self, ready_tasks: list[Task], plan: TaskPlan) -> dict[str, Any]:
        """Einfache heuristische Ausführungsplanung"""

        # Prioritäts-basierte Sortierung
        sorted_tasks = sorted(ready_tasks, key=lambda t: (t.priority, t.created_at))

        # Parallelisierbare Tasks identifizieren
        parallel_groups = []
        current_group = []

        for task in sorted_tasks:
            # ToolTasks können oft parallel laufen
            if isinstance(task, ToolTask) and len(current_group) < self.max_parallel:
                current_group.append(task)
            else:
                if current_group:
                    parallel_groups.append(current_group)
                    current_group = []
                current_group.append(task)

        if current_group:
            parallel_groups.append(current_group)

        strategy = "parallel" if len(parallel_groups) > 1 or len(parallel_groups[0]) > 1 else "sequential"

        return {
            "strategy": strategy,
            "execution_groups": parallel_groups,
            "total_groups": len(parallel_groups),
            "reasoning": "Simple heuristic: priority-based with tool parallelization",
            "estimated_duration": self._estimate_duration(sorted_tasks)
        }

    async def _llm_execution_planning(
        self,
        ready_tasks: list[Task],
        blocked_tasks: list[Task],
        plan: TaskPlan,
        shared: dict
    ) -> dict[str, Any]:
        """Erweiterte LLM-basierte Ausführungsplanung"""

        try:
            # Erstelle detaillierte Task-Analyse für LLM
            task_analysis = self._analyze_tasks_for_llm(ready_tasks, blocked_tasks)
            execution_context = self._build_execution_context(shared)

            prompt = f"""
Du bist ein Experte für Task-Ausführungsplanung. Analysiere die verfügbaren Tasks und erstelle einen optimalen Ausführungsplan.

## Verfügbare Tasks zur Ausführung
{task_analysis['ready_tasks_summary']}

## Blockierte Tasks (zur Information)
{task_analysis['blocked_tasks_summary']}

## Ausführungskontext
- Max parallele Tasks: {self.max_parallel}
- Plan-Strategie: {plan.execution_strategy}
- Verfügbare Tools: {', '.join(shared.get('available_tools', []))}
- Bisherige Ergebnisse: {len(self.results_store)} Tasks abgeschlossen
- Execution History: {len(self.execution_history)} vorherige Zyklen

## Bisherige Performance
{execution_context}

## Aufgabe
Erstelle einen optimierten Ausführungsplan. Berücksichtige:
1. Task-Abhängigkeiten und Prioritäten
2. Parallelisierungsmöglichkeiten
3. Resource-Optimierung (Tools, LLM-Aufrufe)
4. Fehlerwahrscheinlichkeit und Retry-Strategien
5. Dynamische Argument-Auflösung zwischen Tasks

Antworte mit YAML:

```yaml
strategy: "parallel"  # "parallel" | "sequential" | "hybrid"
execution_groups:
  - group_id: 1
    tasks: ["task_1", "task_2"]  # Task IDs
    execution_mode: "parallel"
    priority: "high"
    estimated_duration: 30  # seconds
    risk_level: "low"  # low | medium | high
    dependencies_resolved: true
  - group_id: 2
    tasks: ["task_3"]
    execution_mode: "sequential"
    priority: "medium"
    estimated_duration: 15
    depends_on_groups: [1]
reasoning: "Detailed explanation of the execution strategy"
optimization_suggestions:
  - "Specific optimization 1"
  - "Specific optimization 2"
risk_mitigation:
  - risk: "Tool timeout"
    mitigation: "Use shorter timeout for parallel calls"
  - risk: "Argument resolution failure"
    mitigation: "Validate references before execution"
total_estimated_duration: 45
confidence: 0.85
```"""

            model_to_use = shared.get("complex_llm_model", "openrouter/openai/gpt-4o")

            content = await self.agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.3,
                max_tokens=2000,
                node_name="TaskExecutorNode", task_id="llm_execution_planning"
            )

            yaml_match = re.search(r"```yaml\s*(.*?)\s*```", content, re.DOTALL)
            yaml_str = yaml_match.group(1) if yaml_match else content.strip()

            execution_plan = yaml.safe_load(yaml_str)

            # Validiere und erweitere den Plan
            validated_plan = self._validate_execution_plan(execution_plan, ready_tasks)

            rprint(
                f"LLM execution plan created: {validated_plan.get('strategy')} with {len(validated_plan.get('execution_groups', []))} groups")
            return validated_plan

        except Exception as e:
            eprint(f"LLM execution planning failed: {e}")
            return self._create_simple_execution_plan(ready_tasks, plan)

    def _analyze_tasks_for_llm(self, ready_tasks: list[Task], blocked_tasks: list[Task]) -> dict[str, str]:
        """Analysiere Tasks für LLM-Prompt"""

        ready_summary = []
        for task in ready_tasks:
            task_info = f"- {task.id} ({task.type}): {task.description}"
            if hasattr(task, 'priority'):
                task_info += f" [Priority: {task.priority}]"
            if isinstance(task, ToolTask):
                task_info += f" [Tool: {task.tool_name}]"
                if task.arguments:
                    # Zeige dynamische Referenzen
                    dynamic_refs = [arg for arg in task.arguments.values() if isinstance(arg, str) and "{{" in arg]
                    if dynamic_refs:
                        task_info += f" [Dynamic refs: {len(dynamic_refs)}]"
            ready_summary.append(task_info)

        blocked_summary = []
        for task in blocked_tasks:
            deps = ", ".join(task.dependencies) if task.dependencies else "None"
            blocked_summary.append(f"- {task.id}: waiting for [{deps}]")

        return {
            "ready_tasks_summary": "\n".join(ready_summary) or "No ready tasks",
            "blocked_tasks_summary": "\n".join(blocked_summary) or "No blocked tasks"
        }

    def _build_execution_context(self, shared: dict) -> str:
        """Baue Kontext für LLM-Planung"""
        context_parts = []

        # Performance der letzten Executions
        if self.execution_history:
            recent = self.execution_history[-3:]  # Last 3 executions
            avg_duration = sum(h.get("duration", 0) for h in recent) / len(recent)
            success_rate = sum(1 for h in recent if h.get("success", False)) / len(recent)
            context_parts.append(f"Recent performance: {avg_duration:.1f}s avg, {success_rate:.1%} success rate")

        # Resource utilization
        if self.results_store:
            tool_usage = {}
            for task_result in self.results_store.values():
                metadata = task_result.get("metadata", {})
                task_type = metadata.get("task_type", "unknown")
                tool_usage[task_type] = tool_usage.get(task_type, 0) + 1
            context_parts.append(f"Resource usage: {tool_usage}")

        return "\n".join(context_parts) if context_parts else "No previous execution history"

    def _validate_execution_plan(self, plan: dict, ready_tasks: list[Task]) -> dict:
        """Validiere und korrigiere LLM-generierten Ausführungsplan"""

        # Standard-Werte setzen
        validated = {
            "strategy": plan.get("strategy", "sequential"),
            "execution_groups": [],
            "reasoning": plan.get("reasoning", "LLM-generated plan"),
            "total_estimated_duration": plan.get("total_estimated_duration", 60),
            "confidence": min(1.0, max(0.0, plan.get("confidence", 0.5)))
        }

        # Validiere execution groups
        task_ids_available = [t.id for t in ready_tasks]

        for group_data in plan.get("execution_groups", []):
            group_tasks = group_data.get("tasks", [])
            # Filtere nur verfügbare Tasks
            valid_tasks = [tid for tid in group_tasks if tid in task_ids_available]

            if valid_tasks:
                validated["execution_groups"].append({
                    "group_id": group_data.get("group_id", len(validated["execution_groups"]) + 1),
                    "tasks": valid_tasks,
                    "execution_mode": group_data.get("execution_mode", "sequential"),
                    "priority": group_data.get("priority", "medium"),
                    "estimated_duration": group_data.get("estimated_duration", 30),
                    "risk_level": group_data.get("risk_level", "medium")
                })

        # Falls keine validen Groups, erstelle Fallback
        if not validated["execution_groups"]:
            validated["execution_groups"] = [{
                "group_id": 1,
                "tasks": task_ids_available[:self.max_parallel],
                "execution_mode": "parallel",
                "priority": "high"
            }]

        return validated

    def _estimate_duration(self, tasks: list[Task]) -> int:
        """Schätze Ausführungsdauer in Sekunden"""
        duration = 0
        for task in tasks:
            if isinstance(task, ToolTask):
                duration += 10  # Tool calls meist schneller
            elif isinstance(task, LLMTask):
                duration += 20  # LLM calls brauchen länger
            else:
                duration += 15  # Standard
        return duration

    async def exec_async(self, prep_res):
        """Hauptausführungslogik mit intelligentem Routing"""

        if "error" in prep_res:
            return {"error": prep_res["error"]}

        execution_plan = prep_res["execution_plan"]

        if execution_plan["strategy"] == "waiting":
            return {
                "status": "waiting",
                "message": execution_plan["reason"],
                "blocked_count": execution_plan.get("blocked_count", 0)
            }

        # Starte Ausführung basierend auf Plan
        execution_start = datetime.now()

        try:
            if execution_plan["strategy"] == "parallel":
                results = await self._execute_parallel_plan(execution_plan, prep_res)
            elif execution_plan["strategy"] == "sequential":
                results = await self._execute_sequential_plan(execution_plan, prep_res)
            else:  # hybrid
                results = await self._execute_hybrid_plan(execution_plan, prep_res)

            execution_duration = (datetime.now() - execution_start).total_seconds()

            # Speichere Execution-History für LLM-Optimierung
            self.execution_history.append({
                "timestamp": execution_start.isoformat(),
                "strategy": execution_plan["strategy"],
                "duration": execution_duration,
                "tasks_executed": len(results),
                "success": all(r.get("status") == "completed" for r in results),
                "plan_confidence": execution_plan.get("confidence", 0.5)
            })

            # Behalte nur letzte 10 Executions
            if len(self.execution_history) > 10:
                self.execution_history = self.execution_history[-10:]

            return {
                "status": "executed",
                "results": results,
                "execution_duration": execution_duration,
                "strategy_used": execution_plan["strategy"],
                "completed_tasks": len([r for r in results if r.get("status") == "completed"]),
                "failed_tasks": len([r for r in results if r.get("status") == "failed"])
            }

        except Exception as e:
            eprint(f"Execution plan failed: {e}")
            return {
                "status": "execution_failed",
                "error": str(e),
                "results": []
            }

    async def _execute_parallel_plan(self, plan: dict, prep_res: dict) -> list[dict]:
        """Führe Plan mit parallelen Gruppen aus"""
        all_results = []

        for group in plan["execution_groups"]:
            group_tasks = self._get_tasks_by_ids(group["tasks"], prep_res)

            if group.get("execution_mode") == "parallel":
                # Parallele Ausführung innerhalb der Gruppe
                batch_results = await self._execute_parallel_batch(group_tasks)
            else:
                # Sequenzielle Ausführung innerhalb der Gruppe
                batch_results = await self._execute_sequential_batch(group_tasks)

            all_results.extend(batch_results)

            # Prüfe ob kritische Tasks fehlgeschlagen sind
            critical_failures = [
                r for r in batch_results
                if r.get("status") == "failed" and self._is_critical_task(r.get("task_id"), prep_res)
            ]

            if critical_failures:
                eprint(f"Critical task failures in group {group['group_id']}, stopping execution")
                break

        return all_results

    async def _execute_sequential_plan(self, plan: dict, prep_res: dict) -> list[dict]:
        """Führe Plan sequenziell aus"""
        all_results = []

        for group in plan["execution_groups"]:
            group_tasks = self._get_tasks_by_ids(group["tasks"], prep_res)
            batch_results = await self._execute_sequential_batch(group_tasks)
            all_results.extend(batch_results)

            # Stoppe bei kritischen Fehlern
            critical_failures = [
                r for r in batch_results
                if r.get("status") == "failed" and self._is_critical_task(r.get("task_id"), prep_res)
            ]

            if critical_failures:
                break

        return all_results

    async def _execute_hybrid_plan(self, plan: dict, prep_res: dict) -> list[dict]:
        """Hybride Ausführung - Groups parallel, innerhalb je nach Mode"""

        # Führe Gruppen parallel aus (wenn möglich)
        group_tasks_list = []
        for group in plan["execution_groups"]:
            group_tasks = self._get_tasks_by_ids(group["tasks"], prep_res)
            group_tasks_list.append((group, group_tasks))

        # Führe bis zu max_parallel Gruppen parallel aus
        batch_size = min(len(group_tasks_list), self.max_parallel)
        all_results = []

        for i in range(0, len(group_tasks_list), batch_size):
            batch = group_tasks_list[i:i + batch_size]

            # Erstelle Coroutines für jede Gruppe
            group_coroutines = []
            for group, tasks in batch:
                if group.get("execution_mode") == "parallel":
                    coro = self._execute_parallel_batch(tasks)
                else:
                    coro = self._execute_sequential_batch(tasks)
                group_coroutines.append(coro)

            # Führe Gruppen-Batch parallel aus
            batch_results = await asyncio.gather(*group_coroutines, return_exceptions=True)

            # Flache Liste der Ergebnisse
            for result_group in batch_results:
                if isinstance(result_group, Exception):
                    eprint(f"Group execution failed: {result_group}")
                    continue
                all_results.extend(result_group)

        return all_results

    def _get_tasks_by_ids(self, task_ids: list[str], prep_res: dict) -> list[Task]:
        """Hole Task-Objekte basierend auf IDs"""
        all_tasks = prep_res["all_tasks"]
        return [all_tasks[tid] for tid in task_ids if tid in all_tasks]

    def _is_critical_task(self, task_id: str, prep_res: dict) -> bool:
        """Prüfe ob Task kritisch ist"""
        task = prep_res["all_tasks"].get(task_id)
        if not task:
            return False
        return getattr(task, 'critical', False) or task.priority == 1

    async def _execute_parallel_batch(self, tasks: list[Task]) -> list[dict]:
        """Führe Tasks parallel aus"""
        if not tasks:
            return []

        # Limitiere auf max_parallel
        batch_size = min(len(tasks), self.max_parallel)
        batches = [tasks[i:i + batch_size] for i in range(0, len(tasks), batch_size)]

        all_results = []
        for batch in batches:
            batch_results = await asyncio.gather(
                *[self._execute_single_task(task) for task in batch],
                return_exceptions=True
            )

            # Handle exceptions
            processed_results = []
            for i, result in enumerate(batch_results):
                if isinstance(result, Exception):
                    eprint(f"Task {batch[i].id} failed with exception: {result}")
                    processed_results.append({
                        "task_id": batch[i].id,
                        "status": "failed",
                        "error": str(result)
                    })
                else:
                    processed_results.append(result)

            all_results.extend(processed_results)

        return all_results

    async def _execute_sequential_batch(self, tasks: list[Task]) -> list[dict]:
        """Führe Tasks sequenziell aus"""
        results = []

        for task in tasks:
            try:
                result = await self._execute_single_task(task)
                results.append(result)

                # Stoppe bei kritischen Fehlern in sequenzieller Ausführung
                if result.get("status") == "failed" and getattr(task, 'critical', False):
                    eprint(f"Critical task {task.id} failed, stopping sequential execution")
                    break

            except Exception as e:
                eprint(f"Sequential task {task.id} failed: {e}")
                results.append({
                    "task_id": task.id,
                    "status": "failed",
                    "error": str(e)
                })

                if getattr(task, 'critical', False):
                    break

        return results

    async def _execute_single_task(self, task: Task) -> dict:
        """Enhanced task execution with unified LLMToolNode usage"""
        if self.progress_tracker:
            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="task_start",
                node_name="TaskExecutorNode",
                status=NodeStatus.RUNNING,
                task_id=task.id,
                plan_id=self.variable_manager.get("shared.current_plan.id"),
                metadata={
                    "description": task.description,
                    "type": task.type,
                    "priority": task.priority,
                    "dependencies": task.dependencies
                }
            ))

        task_start = time.perf_counter()
        try:
            task.status = "running"
            task.started_at = datetime.now()

            # Ensure metadata is initialized
            if not hasattr(task, 'metadata') or task.metadata is None:
                task.metadata = {}

            # Pre-process task with variable resolution
            if isinstance(task, ToolTask):
                resolved_args = self._resolve_task_variables(task.arguments)
                result = await self._execute_tool_task_with_validation(task, resolved_args)
            elif isinstance(task, LLMTask):
                # Use LLMToolNode for LLM tasks instead of direct execution
                result = await self._execute_llm_via_llmtool(task)
            elif isinstance(task, DecisionTask):
                # Enhanced decision task with context awareness
                result = await self._execute_decision_task_enhanced(task)
            else:
                # Use LLMToolNode for generic tasks as well
                result = await self._execute_generic_via_llmtool(task)

            # Store result in unified system
            self._store_task_result(task.id, result, True)

            task.result = result
            task.status = "completed"
            task.completed_at = datetime.now()

            task_duration = time.perf_counter() - task_start

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="task_complete",
                    node_name="TaskExecutorNode",
                    task_id=task.id,
                    plan_id=self.variable_manager.get("shared.current_plan.id"),
                    status=NodeStatus.COMPLETED,
                    success=True,
                    duration=task_duration,
                    metadata={
                        "result_type": type(result).__name__,
                        "description": task.description
                    }
                ))

            return {
                "task_id": task.id,
                "status": "completed",
                "result": result
            }

        except Exception as e:
            task.error = str(e)
            task.status = "failed"
            task.retry_count += 1

            # Store error in unified system
            self._store_task_result(task.id, None, False, str(e))
            task_duration = time.perf_counter() - task_start

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="task_error",  # Klarer Event-Typ
                    node_name="TaskExecutorNode",
                    task_id=task.id,
                    plan_id=self.variable_manager.get("shared.current_plan.id"),
                    status=NodeStatus.FAILED,
                    success=False,
                    duration=task_duration,
                    error_details={
                        "message": str(e),
                        "type": type(e).__name__
                    },
                    metadata={
                        "retry_count": task.retry_count,
                        "description": task.description
                    }
                ))

            eprint(f"Task {task.id} failed: {e}")
            return {
                "task_id": task.id,
                "status": "failed",
                "error": str(e),
                "retry_count": task.retry_count
            }

    async def _resolve_dynamic_arguments(self, arguments: dict[str, Any]) -> dict[str, Any]:
        """Enhanced dynamic argument resolution with full variable system"""
        resolved = {}

        for key, value in arguments.items():
            if isinstance(value, str):
                # FIXED: Use unified variable manager for all resolution
                resolved_value = self.variable_manager.format_text(value)

                # Log if variables weren't resolved (debugging)
                if "{{" in resolved_value and "}}" in resolved_value:
                    wprint(f"Unresolved variables in argument '{key}': {resolved_value}")

                resolved[key] = resolved_value
            else:
                resolved[key] = value

        return resolved

    async def _execute_tool_task_with_validation(self, task: ToolTask, resolved_args: dict[str, Any]) -> Any:
        """Tool execution with improved error detection and validation"""

        if not task.tool_name:
            raise ValueError(f"ToolTask {task.id} missing tool_name")

        agent = self.agent_instance
        if not agent:
            raise ValueError("Agent instance not available for tool execution")

        tool_start = time.perf_counter()

        # Track tool call start
        if self.progress_tracker:
            await self.progress_tracker.emit_event(ProgressEvent(
                event_type="tool_call",
                timestamp=time.time(),
                node_name="TaskExecutorNode",
                status=NodeStatus.RUNNING,
                task_id=task.id,
                tool_name=task.tool_name,
                tool_args=resolved_args,
                metadata={
                    "task_type": "ToolTask",
                    "hypothesis": task.hypothesis,
                    "validation_criteria": task.validation_criteria
                }
            ))

        try:
            rprint(f"Executing tool {task.tool_name} with resolved args: {resolved_args}")

            # Execute tool with timeout and retry logic
            result = await self._execute_tool_with_retries(task.tool_name, resolved_args, agent)

            tool_duration = time.perf_counter() - tool_start

            # Validate result before marking as success
            is_valid_result = self._validate_tool_result(result, task)

            if not is_valid_result:
                raise ValueError(f"Tool {task.tool_name} returned invalid result: {type(result).__name__}")

            # Track successful tool call
            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="tool_call",
                    timestamp=time.time(),
                    node_name="TaskExecutorNode",
                    task_id=task.id,
                    status=NodeStatus.COMPLETED,
                    tool_name=task.tool_name,
                    tool_args=resolved_args,
                    tool_result=result,
                    duration=tool_duration,
                    success=True,
                    metadata={
                        "task_type": "ToolTask",
                        "result_type": type(result).__name__,
                        "result_length": len(str(result)),
                        "validation_passed": is_valid_result
                    }
                ))

            # FIXED: Store in variable manager with correct path structure
            if self.variable_manager:
                self.variable_manager.set(f"results.{task.id}.data", result)
                self.variable_manager.set(f"tasks.{task.id}.result", result)

            return result

        except Exception as e:
            tool_duration = time.perf_counter() - tool_start
            import traceback
            print(traceback.format_exc())

            # Detailed error tracking
            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="tool_call",
                    timestamp=time.time(),
                    node_name="TaskExecutorNode",
                    task_id=task.id,
                    status=NodeStatus.FAILED,
                    tool_name=task.tool_name,
                    tool_args=resolved_args,
                    duration=tool_duration,
                    success=False,
                    tool_error=str(e),
                    metadata={
                        "task_type": "ToolTask",
                        "error_type": type(e).__name__,
                        "retry_attempted": hasattr(self, '_retry_count')
                    }
                ))

            eprint(f"Tool execution failed for {task.tool_name}: {e}")
            raise
    async def _execute_llm_via_llmtool(self, task: LLMTask) -> Any:
        """Execute LLM task via LLMToolNode for consistency"""

        # Prepare context for LLMToolNode
        llm_shared = {
            "current_task_description": task.description,
            "formatted_context": {
                "recent_interaction": f"Executing LLM task: {task.description}",
                "session_summary": "",
                "task_context": f"Task ID: {task.id}, Priority: {task.priority}"
            },
            "variable_manager": self.variable_manager,
            "agent_instance": self.agent_instance,
            "available_tools": self.agent_instance.shared.get("available_tools", []) if self.agent_instance else [],
            "tool_capabilities": self.agent_instance._tool_capabilities if self.agent_instance else {},
            "fast_llm_model": self.fast_llm_model,
            "complex_llm_model": self.complex_llm_model,
            "progress_tracker": self.progress_tracker,
            "session_id": getattr(self, 'session_id', 'task_executor'),
            "use_fast_response": task.llm_config.get("model_preference", "fast") == "fast"
        }

        # Create LLMToolNode instance
        llm_node = LLMToolNode()

        # Execute via LLMToolNode
        try:
            result = await llm_node.run_async(llm_shared)
            # shared["current_response"]
            # shared["tool_calls_made"]
            # shared["llm_tool_conversation"]
            # shared["synthesized_response"]
            return llm_shared["current_response"]
        except Exception as e:
            eprint(f"LLMToolNode execution failed for task {task.id}: {e}")
            # Fallback to direct execution
            import traceback
            print(traceback.format_exc())
            return await self._execute_llm_task_enhanced(task)

    async def _execute_llm_task_enhanced(self, task: LLMTask) -> Any:
        """Enhanced LLM task execution with unified variable system"""
        if not LITELLM_AVAILABLE:
            raise Exception("LiteLLM not available for LLM tasks")

        # Get model preference with variable support
        llm_config = task.llm_config
        model_preference = llm_config.get("model_preference", "fast")

        if model_preference == "complex":
            model_to_use = self.variable_manager.get("system.complex_llm_model", "openrouter/openai/gpt-4o")
        else:
            model_to_use = self.variable_manager.get("system.fast_llm_model", "openrouter/anthropic/claude-3-haiku")

        # Build context for prompt
        context_data = {}
        for context_key in task.context_keys:
            value = self.variable_manager.get(context_key)
            if value is not None:
                context_data[context_key] = value

        # Resolve prompt template with full variable system
        final_prompt = self.variable_manager.format_text(
            task.prompt_template,
            context=context_data
        )

        llm_start = time.perf_counter()

        try:

            response = await litellm.acompletion(
                model=model_to_use,
                messages=[{"role": "user", "content": final_prompt}],
                temperature=llm_config.get("temperature", 0.7),
                max_tokens=llm_config.get("max_tokens", 2048)
            )

            result = response


            # Store intermediate result for other tasks
            self.variable_manager.set(f"tasks.{task.id}.result", result)

            # Output schema validation if present
            if task.output_schema:
                stripped = result.strip()

                try:
                    # Try JSON first if it looks like JSON
                    if stripped.startswith('{') or stripped.startswith('['):
                        parsed = json.loads(stripped)
                    else:
                        parsed = yaml.safe_load(stripped)

                    # Ensure metadata is a dict before updating
                    if not isinstance(task.metadata, dict):
                        task.metadata = {}

                    # Save parsed result
                    task.metadata["parsed_output"] = parsed

                except (json.JSONDecodeError, yaml.YAMLError):
                    # Save info about failure without logging output
                    if not isinstance(task.metadata, dict):
                        task.metadata = {}
                    task.metadata["parsed_output_error"] = "Invalid JSON/YAML format"

                except Exception as e:
                    if not isinstance(task.metadata, dict):
                        task.metadata = {}
                    task.metadata["parsed_output_error"] = f"Unexpected error: {str(e)}"

            return result
        except Exception as e:
            llm_duration = time.perf_counter() - llm_start

            if self.progress_tracker:
                await self.progress_tracker.emit_event(ProgressEvent(
                    event_type="llm_call",
                    node_name="TaskExecutorNode",
                    task_id=task.id,
                    status=NodeStatus.FAILED,
                    success=False,
                    duration=llm_duration,
                    llm_model=model_to_use,
                    error_details={
                        "message": str(e),
                        "type": type(e).__name__
                    }
                ))

            raise

    async def _execute_generic_via_llmtool(self, task: Task) -> Any:
        """
        Execute a generic task by treating its description as a query for the LLMToolNode.
        This provides a flexible fallback for undefined task types, leveraging the full
        reasoning and tool-use capabilities of the LLMToolNode.
        """
        # Prepare a shared context dictionary for the LLMToolNode, treating the
        # generic task's description as the primary query.
        llm_shared = {
            "current_task_description": task.description,
            "current_query": task.description,
            "formatted_context": {
                "recent_interaction": f"Executing generic task: {task.description}",
                "session_summary": f"The system needs to complete the following task: {task.description}",
                "task_context": f"Task ID: {task.id}, Priority: {task.priority}, Type: Generic"
            },
            "variable_manager": self.variable_manager,
            "agent_instance": self.agent_instance,
            # Generic tasks might require tools, so provide full tool context.
            "available_tools": self.agent_instance.shared.get("available_tools", []) if self.agent_instance else [],
            "tool_capabilities": self.agent_instance._tool_capabilities if self.agent_instance else {},
            "fast_llm_model": self.fast_llm_model,
            "complex_llm_model": self.complex_llm_model,
            "progress_tracker": self.progress_tracker,
            "session_id": getattr(self, 'session_id', 'task_executor_generic'),
            # Default to a fast model, assuming generic tasks are often straightforward.
            "use_fast_response": True
        }

        # Instantiate the LLMToolNode for this specific execution.
        llm_node = LLMToolNode()

        try:
            # Execute the node. It will run its internal loop for reasoning, tool calling, and response generation.
            # The results of the execution will be populated back into the `llm_shared` dictionary.
            await llm_node.run_async(llm_shared)

            # Extract the final response from the shared context populated by the node.
            # Prioritize the structured 'synthesized_response' but fall back to 'current_response'.
            final_response = llm_shared.get("synthesized_response", {}).get("synthesized_response")
            if not final_response:
                final_response = llm_shared.get("current_response", f"Generic task '{task.id}' processed.")

            return final_response

        except Exception as e:
            eprint(f"LLMToolNode execution for generic task {task.id} failed: {e}")
            # Re-raise the exception to allow the higher-level execution loop in
            # _execute_single_task to catch and handle it appropriately (e.g., for retries).
            raise

    async def _execute_decision_task_enhanced(self, task: DecisionTask) -> str:
        """Enhanced DecisionTask with intelligent replan assessment"""

        if not LITELLM_AVAILABLE:
            raise Exception("LiteLLM not available for decision tasks")

        # Build comprehensive context for decision
        decision_context = self._build_decision_context(task)

        # Enhanced decision prompt with full context
        enhanced_prompt = f"""
You are making a critical routing decision for task execution. Analyze all context carefully.

## Current Situation
{task.decision_prompt}

## Execution Context
{decision_context}

## Available Routing Options
{json.dumps(task.routing_map, indent=2)}

## Decision Guidelines
1. Only trigger "replan_from_here" if there's a genuine failure that cannot be recovered
2. Use "route_to_task" for normal flow continuation
3. Consider the full context, not just immediate results
4. Be conservative with replanning - it's expensive and can cause loops

Based on ALL the context above, what is your decision?
Respond with EXACTLY one of these options: {', '.join(task.routing_map.keys())}

Your decision:"""

        model_to_use = self.fast_llm_model if hasattr(self, 'fast_llm_model') else "openrouter/anthropic/claude-3-haiku"

        try:
            response = await litellm.acompletion(
                model=model_to_use,
                messages=[{"role": "user", "content": enhanced_prompt}],
                temperature=0.1,
                max_tokens=50
            )

            decision = response.choices[0].message.content.strip().lower().split('\n')[0]

            # Find matching key (case-insensitive)
            matched_key = None
            for key in task.routing_map:
                if key.lower() == decision:
                    matched_key = key
                    break

            if not matched_key:
                wprint(f"Decision '{decision}' not in routing map, using first option")
                matched_key = list(task.routing_map.keys())[0] if task.routing_map else "continue"

            routing_instruction = task.routing_map.get(matched_key, matched_key)

            # Enhanced metadata with decision reasoning
            if not hasattr(task, 'metadata'):
                task.metadata = {}

            task.metadata.update({
                "decision_made": matched_key,
                "routing_instruction": routing_instruction,
                "decision_context": decision_context,
                "replan_justified": self._assess_replan_necessity(matched_key, routing_instruction, decision_context)
            })

            # Handle dynamic planning instructions
            if isinstance(routing_instruction, dict) and "action" in routing_instruction:
                action = routing_instruction["action"]

                if action == "replan_from_here":
                    # Add extensive context for replanning
                    task.metadata["replan_context"] = {
                        "new_goal": routing_instruction.get("new_goal", "Continue with alternative approach"),
                        "failure_reason": f"Decision task {task.id} determined: {matched_key}",
                        "original_task": task.id,
                        "context": routing_instruction.get("context", ""),
                        "execution_history": self._get_execution_history_summary(),
                        "failed_approaches": self._identify_failed_approaches(),
                        "success_indicators": self._identify_success_patterns()
                    }

                self.variable_manager.set(f"tasks.{task.id}.result", {
                    "decision": matched_key,
                    "action": action,
                    "instruction": routing_instruction,
                    "confidence": self._calculate_decision_confidence(decision_context)
                })

                return action

            else:
                # Traditional routing
                next_task_id = routing_instruction if isinstance(routing_instruction, str) else str(routing_instruction)

                task.metadata.update({
                    "next_task_id": next_task_id,
                    "routing_action": "route_to_task"
                })

                self.variable_manager.set(f"tasks.{task.id}.result", {
                    "decision": matched_key,
                    "next_task": next_task_id
                })

                return matched_key

        except Exception as e:
            eprint(f"Enhanced decision task failed: {e}")
            raise

    async def post_async(self, shared, prep_res, exec_res):
        """Erweiterte Post-Processing mit dynamischer Plan-Anpassung"""

        # Results store in shared state integrieren
        shared["results"] = self.results_store

        if exec_res is None or "error" in exec_res:
            shared["executor_performance"] = {"status": "error", "last_error": exec_res.get("error")}
            return "execution_error"

        if exec_res["status"] == "waiting":
            shared["executor_status"] = "waiting_for_dependencies"
            return "waiting"

        # Performance-Metriken speichern
        performance_data = {
            "execution_duration": exec_res.get("execution_duration", 0),
            "strategy_used": exec_res.get("strategy_used", "unknown"),
            "completed_tasks": exec_res.get("completed_tasks", 0),
            "failed_tasks": exec_res.get("failed_tasks", 0),
            "success_rate": exec_res.get("completed_tasks", 0) / max(len(exec_res.get("results", [])), 1),
            "timestamp": datetime.now().isoformat()
        }
        shared["executor_performance"] = performance_data

        # Check for dynamic planning actions
        planning_action_detected = False

        for result in exec_res.get("results", []):
            task_id = result["task_id"]
            if task_id in shared["tasks"]:
                task = shared["tasks"][task_id]
                task.status = result["status"]

                if result["status"] == "completed":
                    task.result = result["result"]

                    # Check for planning actions from DecisionTasks
                    if hasattr(task, 'metadata') and task.metadata:
                        routing_action = task.metadata.get("routing_action")

                        if routing_action == "replan_from_here":
                            shared["needs_dynamic_replan"] = True
                            shared["replan_context"] = task.metadata.get("replan_context", {})
                            planning_action_detected = True
                            rprint(f"Dynamic replan triggered by task {task_id}")

                        elif routing_action == "append_plan":
                            shared["needs_plan_append"] = True
                            shared["append_context"] = task.metadata.get("append_context", {})
                            planning_action_detected = True
                            rprint(f"Plan append triggered by task {task_id}")

                    # Store verification results if available
                    if result.get("verification"):
                        if not hasattr(task, 'metadata'):
                            task.metadata = {}
                        task.metadata["verification"] = result["verification"]

                elif result["status"] == "failed":
                    task.error = result.get("error", "Unknown error")

        # Return appropriate status based on planning actions
        if planning_action_detected:
            if shared.get("needs_dynamic_replan"):
                return "needs_dynamic_replan"  # Goes to PlanReflectorNode
            elif shared.get("needs_plan_append"):
                return "needs_plan_append"  # Goes to PlanReflectorNode

        # Regular completion checking
        current_plan = shared["current_plan"]
        if current_plan:
            all_finished = all(
                shared["tasks"][task.id].status in ["completed", "failed"]
                for task in current_plan.tasks
            )

            if all_finished:
                current_plan.status = "completed"
                shared["plan_completion_time"] = datetime.now().isoformat()
                rprint(f"Plan {current_plan.id} finished")
                return "plan_completed"
            else:
                ready_tasks = [
                    task for task in current_plan.tasks
                    if shared["tasks"][task.id].status == "pending"
                ]

                if ready_tasks:
                    return "continue_execution"
                else:
                    return "waiting"

        return "execution_complete"

    def get_execution_statistics(self) -> dict[str, Any]:
        """Erhalte detaillierte Ausführungsstatistiken"""
        if not self.execution_history:
            return {"message": "No execution history available"}

        history = self.execution_history

        return {
            "total_executions": len(history),
            "average_duration": sum(h["duration"] for h in history) / len(history),
            "success_rate": sum(1 for h in history if h["success"]) / len(history),
            "strategy_usage": {
                strategy: sum(1 for h in history if h["strategy"] == strategy)
                for strategy in set(h["strategy"] for h in history)
            },
            "total_tasks_executed": sum(h["tasks_executed"] for h in history),
            "average_confidence": sum(h["plan_confidence"] for h in history) / len(history),
            "recent_performance": history[-3:] if len(history) >= 3 else history
        }

    def _resolve_task_variables(self, data):
        """Unified variable resolution for any task data"""
        if isinstance(data, str):
            res = self.variable_manager.format_text(data)
            return res
        elif isinstance(data, dict):
            resolved = {}
            for key, value in data.items():
                resolved[key] = self._resolve_task_variables(value)
            return resolved
        elif isinstance(data, list):
            return [self._resolve_task_variables(item) for item in data]
        else:
            return data

    def _store_task_result(self, task_id: str, result: Any, success: bool, error: str = None):
        """Store task result in unified variable system"""
        result_data = {
            "data": result,
            "metadata": {
                "task_type": "task",
                "completed_at": datetime.now().isoformat(),
                "success": success
            }
        }

        if error:
            result_data["error"] = error
            result_data["metadata"]["success"] = False

        # Store in results_store and update variable manager
        self.results_store[task_id] = result_data
        self.variable_manager.set_results_store(self.results_store)

        # FIXED: Store actual result data, not the wrapper object
        self.variable_manager.set(f"results.{task_id}.data", result)
        self.variable_manager.set(f"results.{task_id}.metadata", result_data["metadata"])
        if error:
            self.variable_manager.set(f"results.{task_id}.error", error)

    def _build_decision_context(self, task: DecisionTask) -> str:
        """Build comprehensive context for decision making"""

        context_parts = []

        # Recent execution results
        recent_results = []
        for task_id, result_data in list(self.results_store.items())[-3:]:
            success = result_data.get("metadata", {}).get("success", False)
            status = "✓" if success else "✗"
            data_preview = str(result_data.get("data", ""))[:100] + "..."
            recent_results.append(f"{status} {task_id}: {data_preview}")

        if recent_results:
            context_parts.append("Recent Results:\n" + "\n".join(recent_results))

        # Variable context
        if self.variable_manager:
            available_vars = list(self.variable_manager.get_available_variables().keys())[:10]
            context_parts.append(f"Available Variables: {', '.join(available_vars)}")

        # Execution history
        execution_summary = self._get_execution_history_summary()
        if execution_summary:
            context_parts.append(f"Execution Summary: {execution_summary}")

        # Current world model insights
        world_insights = self._get_world_model_insights()
        if world_insights:
            context_parts.append(f"Known Facts: {world_insights}")

        return "\n\n".join(context_parts)

    def _assess_replan_necessity(self, decision: str, routing_instruction: Any, context: str) -> bool:
        """Assess if replanning is truly necessary"""

        if not isinstance(routing_instruction, dict):
            return False

        action = routing_instruction.get("action", "")
        if action != "replan_from_here":
            return False

        # Check if we have genuine failures
        genuine_failures = "error" in context.lower() or "failed" in context.lower()
        alternative_available = len(self.results_store) > 0  # Have some results to work with

        # Be conservative - only replan if really necessary
        return genuine_failures and not alternative_available

    async def _execute_tool_with_retries(self, tool_name: str, args: dict, agent, max_retries: int = 2) -> Any:
        """Execute tool with retry logic"""

        last_exception = None

        for attempt in range(max_retries + 1):
            try:
                result = await agent.arun_function(tool_name, **args)

                # Additional validation - check if result indicates success
                if self._is_tool_result_success(result):
                    return result
                elif attempt < max_retries:
                    wprint(f"Tool {tool_name} returned unclear result, retrying...")
                    continue
                else:
                    return result

            except Exception as e:
                last_exception = e
                if attempt < max_retries:
                    wprint(f"Tool {tool_name} failed (attempt {attempt + 1}), retrying: {e}")
                    await asyncio.sleep(0.5 * (attempt + 1))  # Progressive delay
                else:
                    eprint(f"Tool {tool_name} failed after {max_retries + 1} attempts")

        if last_exception:
            raise last_exception
        else:
            raise RuntimeError(f"Tool {tool_name} failed without exception")

    def _validate_tool_result(self, result: Any, task: ToolTask) -> bool:
        """Validate tool result to prevent false failures"""

        # Basic validation
        if result is None:
            return False

        # Check for common error indicators
        if isinstance(result, str):
            error_indicators = ["error", "failed", "exception", "timeout", "not found"]
            result_lower = result.lower()

            # If result contains error indicators but also has substantial content, it might still be valid
            has_errors = any(indicator in result_lower for indicator in error_indicators)
            has_content = len(result.strip()) > 20

            if has_errors and not has_content:
                return False

        # Check against expectation if provided
        if hasattr(task, 'expectation') and task.expectation:
            expectation_keywords = task.expectation.lower().split()
            result_text = str(result).lower()

            # At least one expectation keyword should be present
            if not any(keyword in result_text for keyword in expectation_keywords):
                wprint(f"Tool result doesn't match expectation: {task.expectation}")

        return True

    def _is_tool_result_success(self, result: Any) -> bool:
        """Determine if a tool result indicates success"""

        if result is None:
            return False

        if isinstance(result, bool):
            return result

        if isinstance(result, list | dict):
            return len(result) > 0

        if isinstance(result, str):
            # Check for explicit success/failure indicators
            result_lower = result.lower()

            success_indicators = ["success", "completed", "found", "retrieved", "generated"]
            failure_indicators = ["error", "failed", "not found", "timeout", "exception"]

            has_success = any(indicator in result_lower for indicator in success_indicators)
            has_failure = any(indicator in result_lower for indicator in failure_indicators)

            if has_success and not has_failure:
                return True
            elif has_failure and not has_success:
                return False
            else:
                # Ambiguous - assume success if there's substantial content
                return len(result.strip()) > 10

        # For other types, assume success if not None
        return True

    def _get_execution_history_summary(self) -> str:
        """Get concise execution history summary"""

        if not hasattr(self, 'execution_history') or not self.execution_history:
            return "No execution history"

        recent = self.execution_history[-3:]  # Last 3 executions
        summaries = []

        for hist in recent:
            status = "Success" if hist.get("success", False) else "Failed"
            duration = hist.get("duration", 0)
            strategy = hist.get("strategy", "Unknown")
            summaries.append(f"{strategy}: {status} ({duration:.1f}s)")

        return "; ".join(summaries)

    def _identify_failed_approaches(self) -> list[str]:
        """Identify approaches that have consistently failed"""

        failed_approaches = []

        # Analyze failed tasks
        for _task_id, result_data in self.results_store.items():
            if not result_data.get("metadata", {}).get("success", True):
                error = result_data.get("error", "")
                if "tool" in error.lower():
                    failed_approaches.append("direct_tool_approach")
                elif "search" in error.lower():
                    failed_approaches.append("search_based_approach")
                elif "llm" in error.lower():
                    failed_approaches.append("llm_direct_approach")

        return list(set(failed_approaches))

    def _identify_success_patterns(self) -> list[str]:
        """Identify patterns that have led to success"""

        success_patterns = []

        # Analyze successful tasks
        successful_results = [
            r for r in self.results_store.values()
            if r.get("metadata", {}).get("success", False)
        ]

        if successful_results:
            # Identify common patterns
            if len(successful_results) > 1:
                success_patterns.append("multi_step_approach")

            for result in successful_results:
                data = result.get("data", "")
                if isinstance(data, str) and len(data) > 100:
                    success_patterns.append("detailed_information_retrieval")

        return list(set(success_patterns))

    def _get_world_model_insights(self) -> str:
        """Get relevant insights from world model"""

        if not self.variable_manager:
            return ""

        world_data = self.variable_manager.scopes.get("world", {})
        if not world_data:
            return "No world model data"

        # Get most recent or relevant facts
        recent_facts = []
        for key, value in list(world_data.items())[:5]:  # Top 5 facts
            recent_facts.append(f"{key}: {str(value)[:50]}...")

        return "; ".join(recent_facts)

    def _calculate_decision_confidence(self, context: str) -> float:
        """Calculate confidence in decision based on context"""

        # Simple heuristic based on context richness
        base_confidence = 0.5

        # Boost confidence if we have rich context
        if len(context) > 200:
            base_confidence += 0.2

        # Boost if we have recent results
        if "Recent Results:" in context:
            base_confidence += 0.2

        # Reduce if there are many failures
        failure_count = context.lower().count("failed") + context.lower().count("error")
        base_confidence -= min(failure_count * 0.1, 0.3)

        return max(0.1, min(1.0, base_confidence))
exec_async(prep_res) async

Hauptausführungslogik mit intelligentem Routing

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
async def exec_async(self, prep_res):
    """Hauptausführungslogik mit intelligentem Routing"""

    if "error" in prep_res:
        return {"error": prep_res["error"]}

    execution_plan = prep_res["execution_plan"]

    if execution_plan["strategy"] == "waiting":
        return {
            "status": "waiting",
            "message": execution_plan["reason"],
            "blocked_count": execution_plan.get("blocked_count", 0)
        }

    # Starte Ausführung basierend auf Plan
    execution_start = datetime.now()

    try:
        if execution_plan["strategy"] == "parallel":
            results = await self._execute_parallel_plan(execution_plan, prep_res)
        elif execution_plan["strategy"] == "sequential":
            results = await self._execute_sequential_plan(execution_plan, prep_res)
        else:  # hybrid
            results = await self._execute_hybrid_plan(execution_plan, prep_res)

        execution_duration = (datetime.now() - execution_start).total_seconds()

        # Speichere Execution-History für LLM-Optimierung
        self.execution_history.append({
            "timestamp": execution_start.isoformat(),
            "strategy": execution_plan["strategy"],
            "duration": execution_duration,
            "tasks_executed": len(results),
            "success": all(r.get("status") == "completed" for r in results),
            "plan_confidence": execution_plan.get("confidence", 0.5)
        })

        # Behalte nur letzte 10 Executions
        if len(self.execution_history) > 10:
            self.execution_history = self.execution_history[-10:]

        return {
            "status": "executed",
            "results": results,
            "execution_duration": execution_duration,
            "strategy_used": execution_plan["strategy"],
            "completed_tasks": len([r for r in results if r.get("status") == "completed"]),
            "failed_tasks": len([r for r in results if r.get("status") == "failed"])
        }

    except Exception as e:
        eprint(f"Execution plan failed: {e}")
        return {
            "status": "execution_failed",
            "error": str(e),
            "results": []
        }
get_execution_statistics()

Erhalte detaillierte Ausführungsstatistiken

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
def get_execution_statistics(self) -> dict[str, Any]:
    """Erhalte detaillierte Ausführungsstatistiken"""
    if not self.execution_history:
        return {"message": "No execution history available"}

    history = self.execution_history

    return {
        "total_executions": len(history),
        "average_duration": sum(h["duration"] for h in history) / len(history),
        "success_rate": sum(1 for h in history if h["success"]) / len(history),
        "strategy_usage": {
            strategy: sum(1 for h in history if h["strategy"] == strategy)
            for strategy in set(h["strategy"] for h in history)
        },
        "total_tasks_executed": sum(h["tasks_executed"] for h in history),
        "average_confidence": sum(h["plan_confidence"] for h in history) / len(history),
        "recent_performance": history[-3:] if len(history) >= 3 else history
    }
post_async(shared, prep_res, exec_res) async

Erweiterte Post-Processing mit dynamischer Plan-Anpassung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
async def post_async(self, shared, prep_res, exec_res):
    """Erweiterte Post-Processing mit dynamischer Plan-Anpassung"""

    # Results store in shared state integrieren
    shared["results"] = self.results_store

    if exec_res is None or "error" in exec_res:
        shared["executor_performance"] = {"status": "error", "last_error": exec_res.get("error")}
        return "execution_error"

    if exec_res["status"] == "waiting":
        shared["executor_status"] = "waiting_for_dependencies"
        return "waiting"

    # Performance-Metriken speichern
    performance_data = {
        "execution_duration": exec_res.get("execution_duration", 0),
        "strategy_used": exec_res.get("strategy_used", "unknown"),
        "completed_tasks": exec_res.get("completed_tasks", 0),
        "failed_tasks": exec_res.get("failed_tasks", 0),
        "success_rate": exec_res.get("completed_tasks", 0) / max(len(exec_res.get("results", [])), 1),
        "timestamp": datetime.now().isoformat()
    }
    shared["executor_performance"] = performance_data

    # Check for dynamic planning actions
    planning_action_detected = False

    for result in exec_res.get("results", []):
        task_id = result["task_id"]
        if task_id in shared["tasks"]:
            task = shared["tasks"][task_id]
            task.status = result["status"]

            if result["status"] == "completed":
                task.result = result["result"]

                # Check for planning actions from DecisionTasks
                if hasattr(task, 'metadata') and task.metadata:
                    routing_action = task.metadata.get("routing_action")

                    if routing_action == "replan_from_here":
                        shared["needs_dynamic_replan"] = True
                        shared["replan_context"] = task.metadata.get("replan_context", {})
                        planning_action_detected = True
                        rprint(f"Dynamic replan triggered by task {task_id}")

                    elif routing_action == "append_plan":
                        shared["needs_plan_append"] = True
                        shared["append_context"] = task.metadata.get("append_context", {})
                        planning_action_detected = True
                        rprint(f"Plan append triggered by task {task_id}")

                # Store verification results if available
                if result.get("verification"):
                    if not hasattr(task, 'metadata'):
                        task.metadata = {}
                    task.metadata["verification"] = result["verification"]

            elif result["status"] == "failed":
                task.error = result.get("error", "Unknown error")

    # Return appropriate status based on planning actions
    if planning_action_detected:
        if shared.get("needs_dynamic_replan"):
            return "needs_dynamic_replan"  # Goes to PlanReflectorNode
        elif shared.get("needs_plan_append"):
            return "needs_plan_append"  # Goes to PlanReflectorNode

    # Regular completion checking
    current_plan = shared["current_plan"]
    if current_plan:
        all_finished = all(
            shared["tasks"][task.id].status in ["completed", "failed"]
            for task in current_plan.tasks
        )

        if all_finished:
            current_plan.status = "completed"
            shared["plan_completion_time"] = datetime.now().isoformat()
            rprint(f"Plan {current_plan.id} finished")
            return "plan_completed"
        else:
            ready_tasks = [
                task for task in current_plan.tasks
                if shared["tasks"][task.id].status == "pending"
            ]

            if ready_tasks:
                return "continue_execution"
            else:
                return "waiting"

    return "execution_complete"
prep_async(shared) async

Enhanced preparation with unified variable system

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
async def prep_async(self, shared):
    """Enhanced preparation with unified variable system"""
    current_plan = shared.get("current_plan")
    tasks = shared.get("tasks", {})

    # Get unified variable manager
    self.variable_manager = shared.get("variable_manager")
    self.progress_tracker = shared.get("progress_tracker")
    if not self.variable_manager:
        self.variable_manager = VariableManager(shared.get("world_model", {}), shared)

    # Register all necessary scopes
    self.variable_manager.set_results_store(self.results_store)
    self.variable_manager.set_tasks_store(tasks)
    self.variable_manager.register_scope('user', shared.get('user_context', {}))
    self.variable_manager.register_scope('system', {
        'timestamp': datetime.now().isoformat(),
        'agent_name': shared.get('agent_instance', {}).amd.name if shared.get('agent_instance') else 'unknown'
    })

    # Stelle sicher, dass Agent-Referenz verfügbar ist
    if not self.agent_instance:
        self.agent_instance = shared.get("agent_instance")

    if not current_plan:
        return {"error": "No active plan", "tasks": tasks}

    # Rest of existing prep_async logic...
    ready_tasks = self._find_ready_tasks(current_plan, tasks)
    blocked_tasks = self._find_blocked_tasks(current_plan, tasks)

    execution_plan = await self._create_intelligent_execution_plan(
        ready_tasks, blocked_tasks, current_plan, shared
    )
    self.complex_llm_model = shared.get("complex_llm_model")
    self.fast_llm_model = shared.get("fast_llm_model")

    return {
        "plan": current_plan,
        "ready_tasks": ready_tasks,
        "blocked_tasks": blocked_tasks,
        "all_tasks": tasks,
        "execution_plan": execution_plan,
        "fast_llm_model": self.fast_llm_model,
        "complex_llm_model": self.complex_llm_model,
        "available_tools": shared.get("available_tools", []),
        "world_model": shared.get("world_model", {}),
        "results": self.results_store,
        "variable_manager": self.variable_manager,
        "progress_tracker": self.progress_tracker ,
    }
TaskManagementFlow

Bases: AsyncFlow

Enhanced Task-Management-Flow with LLMReasonerNode as strategic core. The flow now starts with strategic reasoning and delegates to specialized sub-systems.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
@with_progress_tracking
class TaskManagementFlow(AsyncFlow):
    """
    Enhanced Task-Management-Flow with LLMReasonerNode as strategic core.
    The flow now starts with strategic reasoning and delegates to specialized sub-systems.
    """

    def __init__(self, max_parallel_tasks: int = 3):
        # Create the strategic reasoning core (new primary node)
        self.llm_reasoner = LLMReasonerNode()

        # Create specialized sub-system nodes (now supporting nodes)
        self.planner_node = TaskPlannerNode()
        self.executor_node = TaskExecutorNode(max_parallel=max_parallel_tasks)
        self.sync_node = StateSyncNode()
        self.llm_tool_node = LLMToolNode()

        # Store references for the reasoner to access sub-systems
        # These will be injected into shared state during execution

        # === NEW HIERARCHICAL FLOW STRUCTURE ===

        # Primary flow: LLMReasonerNode is the main orchestrator
        # It makes strategic decisions and routes to appropriate sub-systems

        # The reasoner can internally call any of these sub-systems:
        # - LLMToolNode for direct tool usage
        # - TaskPlanner + TaskExecutor for complex project management
        # - Direct response for simple queries

        # Only one main connection: reasoner completes -> response generation
        self.llm_reasoner - "reasoner_complete" >> self.sync_node

        # Fallback connections for error handling
        self.llm_reasoner - "error" >> self.sync_node
        self.llm_reasoner - "timeout" >> self.sync_node

        # The old linear connections are removed - the reasoner now controls the flow internally

        super().__init__(start=self.llm_reasoner)

    async def run_async(self, shared):
        """Enhanced run with sub-system injection"""

        # Inject sub-system references into shared state so reasoner can access them
        shared["llm_tool_node_instance"] = self.llm_tool_node
        shared["task_planner_instance"] = self.planner_node
        shared["task_executor_instance"] = self.executor_node

        # Store tool registry access for the reasoner
        agent_instance = shared.get("agent_instance")
        if agent_instance:
            shared["tool_registry"] = agent_instance._tool_registry
            shared["tool_capabilities"] = agent_instance._tool_capabilities

        # Execute the flow with the reasoner as starting point
        return await super().run_async(shared)
run_async(shared) async

Enhanced run with sub-system injection

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
async def run_async(self, shared):
    """Enhanced run with sub-system injection"""

    # Inject sub-system references into shared state so reasoner can access them
    shared["llm_tool_node_instance"] = self.llm_tool_node
    shared["task_planner_instance"] = self.planner_node
    shared["task_executor_instance"] = self.executor_node

    # Store tool registry access for the reasoner
    agent_instance = shared.get("agent_instance")
    if agent_instance:
        shared["tool_registry"] = agent_instance._tool_registry
        shared["tool_capabilities"] = agent_instance._tool_capabilities

    # Execute the flow with the reasoner as starting point
    return await super().run_async(shared)
TaskPlannerNode

Bases: AsyncNode

Erweiterte Aufgabenplanung mit dynamischen Referenzen und Tool-Integration

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
@with_progress_tracking
class TaskPlannerNode(AsyncNode):
    """Erweiterte Aufgabenplanung mit dynamischen Referenzen und Tool-Integration"""

    async def prep_async(self, shared):
        """Enhanced preparation with goals-based planning support"""

        # Check if this is a goals-based call from LLMReasonerNode
        replan_context = shared.get("replan_context", {})
        goals_list = replan_context.get("goals", [])

        if goals_list:
            # Goals-based planning (called by LLMReasonerNode)
            return {
                "goals": goals_list,
                "planning_mode": "goals_based",
                "query": shared.get("current_query", ""),
                "reasoning_context": replan_context.get("reasoning_context", ""),
                "triggered_by": replan_context.get("triggered_by", "unknown"),
                "tasks": shared.get("tasks", {}),
                "system_status": shared.get("system_status", "idle"),
                "tool_capabilities": shared.get("tool_capabilities", {}),
                "available_tools_names": shared.get("available_tools", []),
                "strategy": "goals_decomposition",  # New strategy type
                "fast_llm_model": shared.get("fast_llm_model"),
                "complex_llm_model": shared.get("complex_llm_model"),
                "agent_instance": shared.get("agent_instance"),
                "variable_manager": shared.get("variable_manager"),
            }
        else:
            # Legacy planning (original query-based approach)
            return {
                "query": shared.get("current_query", ""),
                "tasks": shared.get("tasks", {}),
                "system_status": shared.get("system_status", "idle"),
                "tool_capabilities": shared.get("tool_capabilities", {}),
                "available_tools_names": shared.get("available_tools", []),
                "strategy": shared.get("selected_strategy", "direct_response"),
                "fast_llm_model": shared.get("fast_llm_model"),
                "complex_llm_model": shared.get("complex_llm_model"),
                "agent_instance": shared.get("agent_instance"),
                "variable_manager": shared.get("variable_manager"),
                "planning_mode": "legacy"
            }

    async def exec_async(self, prep_res):
        if prep_res["strategy"] == "fast_simple_planning":
            return await self._create_simple_plan(prep_res)
        else:
            return await self._advanced_llm_decomposition(prep_res)

    async def post_async(self, shared, prep_res, exec_res):
        """Post-processing nach Plan-Erstellung"""

        if exec_res is None:
            shared["planning_error"] = "Plan creation returned None"
            return "planning_failed"

        if isinstance(exec_res, TaskPlan):

            progress_tracker = shared.get("progress_tracker")
            if progress_tracker:
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="plan_created",
                    node_name="TaskPlannerNode",
                    session_id=shared.get("session_id"),
                    status=NodeStatus.COMPLETED,
                    success=True,
                    plan_id=exec_res.id,
                    metadata={
                        "plan_name": exec_res.name,
                        "task_count": len(exec_res.tasks),
                        "strategy": exec_res.execution_strategy
                    }
                ))
                await asyncio.sleep(0.1)

            # Erfolgreicher Plan
            shared["current_plan"] = exec_res

            # Tasks in shared state für Executor verfügbar machen
            task_dict = {task.id: task for task in exec_res.tasks}
            if "tasks" not in shared:
                shared["tasks"] = task_dict
            else:
                shared["tasks"].update(task_dict)

            # Plan-Metadaten setzen
            shared["plan_created_at"] = datetime.now().isoformat()
            shared["plan_strategy"] = exec_res.execution_strategy
            shared["total_tasks_planned"] = len(exec_res.tasks)

            rprint(f"Plan created successfully: {exec_res.name} with {len(exec_res.tasks)} tasks")
            return "planned"

        else:
            # Plan creation failed
            shared["planning_error"] = "Invalid plan format returned"
            shared["current_plan"] = None
            eprint("Plan creation failed - invalid format")
            return "planning_failed"

    async def _create_simple_plan(self, prep_res) -> TaskPlan:
        """Fast lightweight planning for direct or simple multi-step queries."""
        taw = self._build_tool_intelligence(prep_res)
        rprint("You are a FAST "+ taw)
        prompt = f"""
You are a FAST abstract pattern recognizer and task planner.
Identify if the query needs a **single-step LLM answer** or a **simple 2–3 task plan** using available tools.
Output ONLY YAML.

## User Query
{prep_res['query']}

## Available Tools
{taw}

## Pattern Recognition (Internal Only)
- Detect if query is informational, action-based, or tool-eligible.
- Map to minimal plan type: "direct_llm" or "simple_tool_plus_llm".

## YAML Schema
```yaml
plan_name: string
description: string
execution_strategy: "sequential" | "parallel"
tasks:
  - id: string
    type: "LLMTask" | "ToolTask"
    description: string
    priority: int
    dependencies: [list]
Example 1 — Direct LLM
```yaml
plan_name: Direct Response
description: Quick answer from LLM
execution_strategy: sequential
tasks:
  - id: answer
    type: LLMTask
    description: Respond to query
    priority: 1
    dependencies: []
    prompt_template: Respond concisely to: {prep_res['query']}
    llm_config:
      model_preference: fast
      temperature: 0.3
```
Example 2 — Tool + LLM
```yaml
plan_name: Fetch and Answer
description: Get info from tool and summarize
execution_strategy: sequential
tasks:
  - id: fetch_info
    type: ToolTask
    description: Get required data
    priority: 1
    dependencies: []
    tool_name: info_api
    arguments:
      query: "{prep_res['query']}"
  - id: summarize
    type: LLMTask
    description: Summarize fetched data
    priority: 2
    dependencies: ["fetch_info"]
    prompt_template: Summarize: {{ results.fetch_info.data }}
    llm_config:
      model_preference: fast
      temperature: 0.3
```
Output Requirements
Use ONLY YAML for the final output
Pick minimal plan type for fastest completion!
focus on correct quotation and correct yaml format!
    """

        try:
            agent_instance = prep_res["agent_instance"]
            content = await agent_instance.a_run_llm_completion(
                model=prep_res.get("complex_llm_model", "openrouter/anthropic/claude-3-haiku"),
                messages=[{"role": "user", "content": prompt}],
                temperature=0.3,
                max_tokens=4512,
                node_name="TaskPlannerNode", task_id="fast_simple_planning"
            )

            yaml_content = content.split("```yaml")[1].split("```")[0].strip() if "```yaml" in content else content
            plan_data = yaml.safe_load(yaml_content)
            # print("Simple", json.dumps(plan_data, indent=2))
            return TaskPlan(
                id=str(uuid.uuid4()),
                name=plan_data.get("plan_name", "Generated Plan"),
                description=plan_data.get("description", f"Plan for: {prep_res['query']}"),
                tasks=[
                    [LLMTask, ToolTask, DecisionTask, Task][["LLMTask", "ToolTask", "DecisionTask", "Task"].index(t.get("type"))](**t)
                    for t in plan_data.get("tasks", [])
                ],
                execution_strategy=plan_data.get("execution_strategy", "sequential")
            )

        except Exception as e:
            eprint(f"Simple plan creation failed: {e}")
            import traceback
            print(traceback.format_exc())
            return TaskPlan(
                id=str(uuid.uuid4()),
                name="Fallback Plan",
                description="Direct response only",
                tasks=[
                    LLMTask(
                        id="fast_simple_planning",
                        type="LLMTask",
                        description="Generate direct response",
                        priority=1,
                        dependencies=[],
                        prompt_template=f"Respond to the query: {prep_res['query']}",
                        llm_config={"model_preference": "fast"}
                    )
                ]
            )

    async def _advanced_llm_decomposition(self, prep_res) -> TaskPlan:
        """Enhanced LLM-based decomposition with goals-based planning support"""

        planning_mode = prep_res.get("planning_mode", "legacy")
        variable_manager = prep_res.get("variable_manager")
        tool_intelligence = self._build_tool_intelligence(prep_res)

        if planning_mode == "goals_based":
            # Goals-based planning from LLMReasonerNode
            goals_list = prep_res.get("goals", [])
            reasoning_context = prep_res.get("reasoning_context", "")

            prompt = f"""
You are an expert task planner specialized in creating execution plans from strategic goals.
Create a comprehensive plan that addresses all goals with proper dependencies and parallelization.

## Strategic Goals from Reasoner
{chr(10).join([f"{i + 1}. {goal}" for i, goal in enumerate(goals_list)])}

## Reasoning Context
{reasoning_context}

## Your Available Tools & Intelligence
{tool_intelligence}

{variable_manager.get_llm_variable_context() if variable_manager else ""}

## Goals-Based Planning Instructions
1. Analyze each goal for dependencies on other goals
2. Identify goals that can be executed in parallel
3. Create tasks that address each goal effectively
4. Use variable references {{ results.task_id.data }} for dependencies
5. Ensure proper sequencing and coordination

## YAML Schema
```yaml
plan_name: string
description: string
execution_strategy: "sequential" | "parallel" | "mixed"
tasks:
  - id: string
    type: "LLMTask" | "ToolTask" | "DecisionTask"
    description: string
    priority: int
    dependencies: [list of task ids]
    # Type-specific fields as needed
Goals Decomposition Strategy

Independent Goals: Create parallel tasks
Sequential Goals: Use dependencies array
Complex Goals: Break into sub-tasks with DecisionTask routing
Data Dependencies: Use variable references between tasks

Example for Multi-Goal Plan
yamlCopyplan_name: "Multi-Goal Strategic Plan"
description: "Execute multiple strategic objectives with proper coordination"
execution_strategy: "mixed"
tasks:
  - id: "goal_1_research"
    type: "ToolTask"
    description: "Research data for Goal 1"
    priority: 1
    dependencies: []
    tool_name: "search_web"
    arguments:
      query: "research topic for goal 1"

  - id: "goal_2_research"
    type: "ToolTask"
    description: "Research data for Goal 2"
    priority: 1
    dependencies: []
    tool_name: "search_web"
    arguments:
      query: "research topic for goal 2"

  - id: "analyze_combined"
    type: "LLMTask"
    description: "Analyze combined research results"
    priority: 2
    dependencies: ["goal_1_research", "goal_2_research"]
    prompt_template: |
      Analyze these research results:
      Goal 1 Data: {{ results.goal_1_research.data }}
      Goal 2 Data: {{ results.goal_2_research.data }}

      Provide comprehensive analysis addressing both goals.
    llm_config:
      model_preference: "complex"
      temperature: 0.3
Generate the execution plan for the strategic goals:
    """

        else:
            # Legacy single-query planning
            base_query = prep_res['query']
            prompt = f"""
You are an expert task planner with dynamic adaptation capabilities.
Create intelligent, adaptive execution plans for the user query.
User Query
{base_query}
Your Available Tools & Intelligence
{tool_intelligence}
{variable_manager.get_llm_variable_context() if variable_manager else ""}
TASK TYPES (Dataclass-Aligned)

LLMTask: Step that uses a language model
ToolTask: Step that calls an available tool
DecisionTask: Step that decides routing between tasks

YAML SCHEMA
yamlCopyplan_name: string
description: string
execution_strategy: "sequential" | "parallel" | "mixed"
tasks:
  - id: string
    type: "LLMTask" | "ToolTask" | "DecisionTask"
    description: string
    priority: int
    dependencies: [list of task ids]
    # Additional fields depending on type
Generate the adaptive execution plan:
            """

        try:
            model_to_use = prep_res.get("complex_llm_model", "openrouter/openai/gpt-4o")
            agent_instance = prep_res["agent_instance"]

            content = await agent_instance.a_run_llm_completion(
                model=model_to_use,
                messages=[{"role": "user", "content": prompt}],
                temperature=0.3,
                #max_tokens=2048,
                node_name="TaskPlannerNode",
                task_id="goals_based_planning" if planning_mode == "goals_based" else "adaptive_planning"
            )

            if "```yaml" in content:
                yaml_content = content.split("```yaml")[1].split("```")[0].strip()
            else:
                yaml_content = content

            plan_data = yaml.safe_load(yaml_content)

            # Create specialized tasks
            tasks = []
            for task_data in plan_data.get("tasks", []):
                task_type = task_data.pop("type", "generic")
                task = create_task(task_type, **task_data)
                tasks.append(task)

            plan = TaskPlan(
                id=str(uuid.uuid4()),
                name=plan_data.get("plan_name", "Generated Plan"),
                description=plan_data.get("description",
                                          "Plan for goals-based execution" if planning_mode == "goals_based" else f"Plan for: {base_query}"),
                tasks=tasks,
                execution_strategy=plan_data.get("execution_strategy", "sequential"),
                metadata={
                    "planning_mode": planning_mode,
                    "goals_count": len(prep_res.get("goals", [])) if planning_mode == "goals_based" else 1
                }
            )

            rprint(f"Created {planning_mode} plan with {len(tasks)} tasks")
            return plan

        except Exception as e:
            eprint(f"Advanced planning failed: {e}")
            import traceback

            print(traceback.format_exc())
            return await self._create_simple_plan(prep_res)

    def _build_tool_intelligence(self, prep_res: dict) -> str:
        """Build detailed tool intelligence for planning"""

        agent_instance = prep_res.get("agent_instance")
        if not agent_instance or not hasattr(agent_instance, '_tool_capabilities'):
            return "No tool intelligence available."

        capabilities = agent_instance._tool_capabilities
        query = prep_res.get('query', '').lower()

        context_parts = []
        context_parts.append("### Intelligent Tool Analysis:")

        for tool_name, cap in capabilities.items():
            context_parts.append(f"\n{tool_name}:")
            context_parts.append(f"- Function: {cap.get('primary_function', 'Unknown')}")
            context_parts.append(f"- Arguments: {yaml.dump(cap.get('args_schema', 'takes no arguments!'), default_flow_style=False)}")

            # Check relevance to current query
            relevance_score = self._calculate_tool_relevance(query, cap)
            context_parts.append(f"- Query relevance: {relevance_score:.2f}")

            if relevance_score > 0.4:
                context_parts.append("- ⭐ HIGHLY RELEVANT - SHOULD USE THIS TOOL!")

            # Show trigger analysis
            triggers = cap.get('trigger_phrases', [])
            matched_triggers = [t for t in triggers if t.lower() in query]
            if matched_triggers:
                context_parts.append(f"- Matched triggers: {matched_triggers}")

            # Show use cases
            use_cases = cap.get('use_cases', [])[:3]
            context_parts.append(f"- Use cases: {', '.join(use_cases)}")

        return "\n".join(context_parts)

    def _calculate_tool_relevance(self, query: str, capabilities: dict) -> float:
        """Calculate how relevant a tool is to the current query"""

        query_words = set(query.lower().split())

        # Check trigger phrases
        trigger_score = 0.0
        triggers = capabilities.get('trigger_phrases', [])
        for trigger in triggers:
            trigger_words = set(trigger.lower().split())
            if trigger_words.intersection(query_words):
                trigger_score += 0.04
        # Check confidence triggers if available
        conf_triggers = capabilities.get('confidence_triggers', {})
        for phrase, confidence in conf_triggers.items():
            if phrase.lower() in query:
                trigger_score += confidence/10
        # Check indirect connections
        indirect = capabilities.get('indirect_connections', [])
        for connection in indirect:
            connection_words = set(connection.lower().split())
            if connection_words.intersection(query_words):
                trigger_score += 0.02
        return min(1.0, trigger_score)
post_async(shared, prep_res, exec_res) async

Post-processing nach Plan-Erstellung

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
async def post_async(self, shared, prep_res, exec_res):
    """Post-processing nach Plan-Erstellung"""

    if exec_res is None:
        shared["planning_error"] = "Plan creation returned None"
        return "planning_failed"

    if isinstance(exec_res, TaskPlan):

        progress_tracker = shared.get("progress_tracker")
        if progress_tracker:
            await progress_tracker.emit_event(ProgressEvent(
                event_type="plan_created",
                node_name="TaskPlannerNode",
                session_id=shared.get("session_id"),
                status=NodeStatus.COMPLETED,
                success=True,
                plan_id=exec_res.id,
                metadata={
                    "plan_name": exec_res.name,
                    "task_count": len(exec_res.tasks),
                    "strategy": exec_res.execution_strategy
                }
            ))
            await asyncio.sleep(0.1)

        # Erfolgreicher Plan
        shared["current_plan"] = exec_res

        # Tasks in shared state für Executor verfügbar machen
        task_dict = {task.id: task for task in exec_res.tasks}
        if "tasks" not in shared:
            shared["tasks"] = task_dict
        else:
            shared["tasks"].update(task_dict)

        # Plan-Metadaten setzen
        shared["plan_created_at"] = datetime.now().isoformat()
        shared["plan_strategy"] = exec_res.execution_strategy
        shared["total_tasks_planned"] = len(exec_res.tasks)

        rprint(f"Plan created successfully: {exec_res.name} with {len(exec_res.tasks)} tasks")
        return "planned"

    else:
        # Plan creation failed
        shared["planning_error"] = "Invalid plan format returned"
        shared["current_plan"] = None
        eprint("Plan creation failed - invalid format")
        return "planning_failed"
prep_async(shared) async

Enhanced preparation with goals-based planning support

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
async def prep_async(self, shared):
    """Enhanced preparation with goals-based planning support"""

    # Check if this is a goals-based call from LLMReasonerNode
    replan_context = shared.get("replan_context", {})
    goals_list = replan_context.get("goals", [])

    if goals_list:
        # Goals-based planning (called by LLMReasonerNode)
        return {
            "goals": goals_list,
            "planning_mode": "goals_based",
            "query": shared.get("current_query", ""),
            "reasoning_context": replan_context.get("reasoning_context", ""),
            "triggered_by": replan_context.get("triggered_by", "unknown"),
            "tasks": shared.get("tasks", {}),
            "system_status": shared.get("system_status", "idle"),
            "tool_capabilities": shared.get("tool_capabilities", {}),
            "available_tools_names": shared.get("available_tools", []),
            "strategy": "goals_decomposition",  # New strategy type
            "fast_llm_model": shared.get("fast_llm_model"),
            "complex_llm_model": shared.get("complex_llm_model"),
            "agent_instance": shared.get("agent_instance"),
            "variable_manager": shared.get("variable_manager"),
        }
    else:
        # Legacy planning (original query-based approach)
        return {
            "query": shared.get("current_query", ""),
            "tasks": shared.get("tasks", {}),
            "system_status": shared.get("system_status", "idle"),
            "tool_capabilities": shared.get("tool_capabilities", {}),
            "available_tools_names": shared.get("available_tools", []),
            "strategy": shared.get("selected_strategy", "direct_response"),
            "fast_llm_model": shared.get("fast_llm_model"),
            "complex_llm_model": shared.get("complex_llm_model"),
            "agent_instance": shared.get("agent_instance"),
            "variable_manager": shared.get("variable_manager"),
            "planning_mode": "legacy"
        }
ToolAnalysis

Bases: BaseModel

Defines the structure for a valid tool analysis.

Source code in toolboxv2/mods/isaa/base/Agent/types.py
782
783
784
785
786
787
788
789
790
791
792
class ToolAnalysis(BaseModel):
    """Defines the structure for a valid tool analysis."""
    primary_function: str = Field(..., description="The main purpose of the tool.")
    use_cases: list[str] = Field(..., description="Specific use cases for the tool.")
    trigger_phrases: list[str] = Field(..., description="Phrases that should trigger the tool.")
    indirect_connections: list[str] = Field(..., description="Non-obvious connections or applications.")
    complexity_scenarios: list[str] = Field(..., description="Complex scenarios where the tool can be applied.")
    user_intent_categories: list[str] = Field(..., description="Categories of user intent the tool addresses.")
    confidence_triggers: dict[str, float] = Field(..., description="Phrases mapped to confidence scores.")
    tool_complexity: str = Field(..., description="The complexity of the tool, rated as low, medium, or high.")
    args_schema: dict[str, Any] | None = Field(..., description="The schema for the tool's arguments.")
ToolTask dataclass

Bases: Task

Spezialisierter Task für Tool-Aufrufe

Source code in toolboxv2/mods/isaa/base/Agent/types.py
483
484
485
486
487
488
489
490
@dataclass
class ToolTask(Task):
    """Spezialisierter Task für Tool-Aufrufe"""
    tool_name: str = ""
    arguments: dict[str, Any] = field(default_factory=dict)  # Kann {{ }} Referenzen enthalten
    hypothesis: str = ""  # Was erwarten wir von diesem Tool?
    validation_criteria: str = ""  # Wie validieren wir das Ergebnis?
    expectation: str = ""  # Wie sollte das Ergebnis aussehen?
UnifiedContextManager

Zentrale Orchestrierung aller Context-Quellen für einheitlichen und effizienten Datenzugriff. Vereinigt ChatSession, VariableManager, World Model und Task Results.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7558
7559
7560
7561
7562
7563
7564
7565
7566
7567
7568
7569
7570
7571
7572
7573
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612
7613
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645
7646
7647
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
7677
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
7741
7742
7743
7744
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755
7756
7757
7758
7759
7760
7761
7762
7763
7764
7765
7766
7767
7768
7769
7770
7771
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781
7782
7783
7784
7785
7786
7787
7788
7789
7790
7791
7792
7793
7794
7795
7796
7797
7798
7799
7800
7801
7802
7803
7804
7805
7806
7807
7808
7809
7810
7811
7812
7813
7814
7815
7816
7817
7818
7819
7820
7821
7822
7823
7824
7825
7826
7827
7828
7829
7830
7831
7832
7833
7834
7835
7836
7837
7838
7839
7840
7841
7842
7843
7844
7845
7846
7847
7848
7849
7850
7851
7852
7853
7854
7855
7856
7857
7858
7859
7860
7861
7862
7863
7864
7865
7866
7867
7868
7869
7870
7871
7872
7873
7874
7875
7876
7877
7878
7879
7880
7881
7882
7883
7884
7885
7886
7887
7888
7889
7890
7891
7892
7893
7894
7895
7896
7897
7898
7899
7900
7901
7902
7903
7904
7905
7906
7907
7908
7909
7910
7911
7912
7913
7914
7915
7916
7917
7918
7919
7920
7921
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944
7945
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
class UnifiedContextManager:
    """
    Zentrale Orchestrierung aller Context-Quellen für einheitlichen und effizienten Datenzugriff.
    Vereinigt ChatSession, VariableManager, World Model und Task Results.
    """

    def __init__(self, agent):
        self.agent = agent
        self.session_managers: dict[str, Any] = {}  # ChatSession objects
        self.variable_manager: VariableManager = None
        self.compression_threshold = 15  # Messages before compression
        self._context_cache: dict[str, tuple[float, Any]] = {}  # (timestamp, data)
        self.cache_ttl = 300  # 5 minutes
        self._memory_instance = None

    async def initialize_session(self, session_id: str, max_history: int = 200):
        """Initialisiere oder lade existierende ChatSession als primäre Context-Quelle"""
        if session_id not in self.session_managers:
            try:
                # Get memory instance
                if not self._memory_instance:
                    from toolboxv2 import get_app
                    self._memory_instance = get_app().get_mod("isaa").get_memory()
                from toolboxv2.mods.isaa.extras.session import ChatSession
                # Create ChatSession as PRIMARY memory source
                session = ChatSession(
                    self._memory_instance,
                    max_length=max_history,
                    space_name=f"ChatSession/{self.agent.amd.name}.{session_id}.unified"
                )
                self.session_managers[session_id] = session

                # Integration mit VariableManager wenn verfügbar
                if self.variable_manager:
                    self.variable_manager.register_scope(f'session_{session_id}', {
                        'chat_session_active': True,
                        'history_length': len(session.history),
                        'last_interaction': None,
                        'session_id': session_id
                    })

                rprint(f"Unified session context initialized for {session_id}")
                return session

            except Exception as e:
                eprint(f"Failed to create ChatSession for {session_id}: {e}")
                # Fallback: Create minimal session manager
                self.session_managers[session_id] = {
                    'history': [],
                    'session_id': session_id,
                    'fallback_mode': True
                }
                return self.session_managers[session_id]

        return self.session_managers[session_id]

    async def add_interaction(self, session_id: str, role: str, content: str, metadata: dict = None) -> None:
        """Einheitlicher Weg um Interaktionen in ChatSession zu speichern"""
        session = await self.initialize_session(session_id)

        message = {
            'role': role,
            'content': content,
            'timestamp': datetime.now().isoformat(),
            'session_id': session_id,
            'metadata': metadata or {}
        }

        # PRIMARY: Store in ChatSession
        if hasattr(session, 'add_message'):
            await session.add_message(message, direct=False)
        elif isinstance(session, dict) and 'history' in session:
            # Fallback mode
            session['history'].append(message)
            # Keep max length
            max_len = 200
            if len(session['history']) > max_len:
                session['history'] = session['history'][-max_len:]

        # SECONDARY: Update VariableManager
        if self.variable_manager:
            self.variable_manager.set(f'session_{session_id}.last_interaction', message)
            if hasattr(session, 'history'):
                self.variable_manager.set(f'session_{session_id}.history_length', len(session.history))
            elif isinstance(session, dict):
                self.variable_manager.set(f'session_{session_id}.history_length', len(session.get('history', [])))

        # Clear context cache for this session
        self._invalidate_cache(session_id)

    async def get_contextual_history(self, session_id: str, query: str = "", max_entries: int = 10) -> list[dict]:
        """Intelligente Auswahl relevanter Geschichte aus ChatSession"""
        session = self.session_managers.get(session_id)
        if not session:
            return []

        try:
            # ChatSession mode
            if hasattr(session, 'get_past_x'):
                recent_history = session.get_past_x(max_entries * 2, last_u=False)
                # await session.get_reference(query, limit=5)

                return recent_history[:max_entries]

            # Fallback mode
            elif isinstance(session, dict) and 'history' in session:
                history = session['history']
                # Return last max_entries, starting with last user message
                result = []
                for msg in reversed(history[-max_entries * 2:]):
                    result.append(msg)
                    if msg.get('role') == 'user' and len(result) >= max_entries:
                        break
                return list(reversed(result))[:max_entries]

        except Exception as e:
            eprint(f"Error getting contextual history: {e}")

        return []

    async def build_unified_context(self, session_id: str, query: str = None, context_type: str = "full") -> dict[
        str, Any]:
        """ZENTRALE Methode für vollständigen Context-Aufbau aus allen Quellen"""

        # Cache check
        cache_key = f"{session_id}_{hash(query or '')}_{context_type}"
        cached = self._get_cached_context(cache_key)
        if cached:
            return cached

        context = {
            'timestamp': datetime.now().isoformat(),
            'session_id': session_id,
            'query': query,
            'context_type': context_type
        }

        try:
            # 1. CHAT HISTORY (Primary - from ChatSession)
            context['chat_history'] = await self.get_contextual_history(
                session_id, query or "", max_entries=15
            )

            # 2. VARIABLE SYSTEM STATE
            if self.variable_manager:
                context['variables'] = {
                    'available_scopes': list(self.variable_manager.scopes.keys()),
                    'total_variables': len(self.variable_manager.get_available_variables()),
                    'recent_results': self._get_recent_results(5)
                }
            else:
                context['variables'] = {'status': 'variable_manager_not_available'}

            # 3. WORLD MODEL FACTS
            if self.variable_manager:
                world_model = self.variable_manager.get('world', {})
                if world_model and query:
                    context['relevant_facts'] = self._extract_relevant_facts(world_model, query)
                else:
                    context['relevant_facts'] = list(world_model.items())[:5]  # Top 5 facts

            # 4. EXECUTION STATE
            context['execution_state'] = {
                'active_tasks': self._get_active_tasks(),
                'recent_completions': self._get_recent_completions(3),
                'system_status': self.agent.shared.get('system_status', 'idle')
            }

            # 5. SESSION STATISTICS
            context['session_stats'] = {
                'total_sessions': len(self.session_managers),
                'current_session_length': len(context['chat_history']),
                'cache_enabled': bool(self._context_cache)
            }

        except Exception as e:
            eprint(f"Error building unified context: {e}")
            context['error'] = str(e)
            context['fallback_mode'] = True

        # Cache result
        self._cache_context(cache_key, context)
        return context

    def get_formatted_context_for_llm(self, unified_context: dict[str, Any]) -> str:
        """Formatiere unified context für LLM consumption"""
        try:
            parts = []

            # Recent Chat History
            chat_history = unified_context.get('chat_history', [])
            if chat_history:
                parts.append("## Recent Conversation")
                for msg in chat_history[-5:]:  # Last 5 messages
                    timestamp = msg.get('timestamp', '')[:19]  # Remove microseconds
                    role = msg.get('role', 'unknown')
                    content = msg.get('content', '')[:500] + ("..." if len(msg.get('content', '')) > 500 else "")
                    parts.append(f"[{timestamp}] {role}: {content}")

            # System Status
            execution_state = unified_context.get('execution_state', {})
            if execution_state:
                parts.append("\n## Current System State")
                parts.append(f"Status: {execution_state.get('system_status', 'unknown')}")

                active_tasks = execution_state.get('active_tasks', [])
                if active_tasks:
                    parts.append(f"Active Tasks: {len(active_tasks)}")

                recent_completions = execution_state.get('recent_completions', [])
                if recent_completions:
                    parts.append(f"Recent Completions: {len(recent_completions)}")

            # Available Data
            variables = unified_context.get('variables', {})
            if variables and variables.get('recent_results'):
                parts.append("\n## Available Results")
                recent_results = variables['recent_results']
                for result in recent_results[:3]:  # Top 3 results
                    parts.append(f"- {result.get('task_id', 'unknown')}: {str(result.get('preview', ''))[:100]}...")

            # World Model Facts
            relevant_facts = unified_context.get('relevant_facts', [])
            if relevant_facts:
                parts.append("\n## Known Facts")
                for key, value in relevant_facts[:5]:  # Top 5 facts
                    fact_preview = str(value)[:100] + ("..." if len(str(value)) > 100 else "")
                    parts.append(f"- {key}: {fact_preview}")

            parts.append(f"\n---\nContext generated at: {unified_context.get('timestamp', 'unknown')}")

            return "\n".join(parts)

        except Exception as e:
            eprint(f"Error formatting context for LLM: {e}")
            return f"Context formatting error: {str(e)}"

    def _merge_and_dedupe_history(self, recent_history: list[dict], relevant_refs: list) -> list[dict]:
        """Merge und dedupliziere History-Einträge"""
        try:
            merged = recent_history.copy()

            # Add relevant references if they're not already in recent history
            for ref in relevant_refs:
                # Convert ref to message format if needed
                if isinstance(ref, dict) and 'content' in ref:
                    # Check if not already in recent_history
                    is_duplicate = any(
                        msg.get('content', '') == ref.get('content', '') and
                        msg.get('timestamp', '') == ref.get('timestamp', '')
                        for msg in merged
                    )
                    if not is_duplicate:
                        merged.append(ref)

            # Sort by timestamp
            merged.sort(key=lambda x: x.get('timestamp', ''))

            return merged
        except:
            return recent_history

    def _get_recent_results(self, limit: int = 5) -> list[dict]:
        """Hole recent results aus dem shared state"""
        try:
            results_store = self.agent.shared.get("results", {})
            recent_results = []

            for task_id, result_data in list(results_store.items())[-limit:]:
                if result_data and result_data.get("data"):
                    preview = str(result_data["data"])[:150] + "..."
                    recent_results.append({
                        "task_id": task_id,
                        "preview": preview,
                        "success": result_data.get("metadata", {}).get("success", False),
                        "timestamp": result_data.get("metadata", {}).get("completed_at")
                    })

            return recent_results
        except:
            return []

    def _extract_relevant_facts(self, world_model: dict, query: str) -> list[tuple[str, Any]]:
        """Extrahiere relevante Facts basierend auf Query"""
        try:
            query_words = set(query.lower().split())
            relevant_facts = []

            for key, value in world_model.items():
                # Simple relevance scoring
                key_words = set(key.lower().split())
                value_words = set(str(value).lower().split())

                # Check for word overlap
                key_overlap = len(query_words.intersection(key_words))
                value_overlap = len(query_words.intersection(value_words))

                if key_overlap > 0 or value_overlap > 0:
                    relevance_score = key_overlap * 2 + value_overlap  # Key matches weighted higher
                    relevant_facts.append((relevance_score, key, value))

            # Sort by relevance and return top facts
            relevant_facts.sort(key=lambda x: x[0], reverse=True)
            return [(key, value) for _, key, value in relevant_facts[:5]]
        except:
            return list(world_model.items())[:5]

    def _get_active_tasks(self) -> list[dict]:
        """Hole aktive Tasks"""
        try:
            tasks = self.agent.shared.get("tasks", {})
            return [
                {"id": task_id, "description": task.description, "status": task.status}
                for task_id, task in tasks.items()
                if task.status == "running"
            ]
        except:
            return []

    def _get_recent_completions(self, limit: int = 3) -> list[dict]:
        """Hole recent completions"""
        try:
            tasks = self.agent.shared.get("tasks", {})
            completed = [
                {"id": task_id, "description": task.description, "completed_at": task.completed_at}
                for task_id, task in tasks.items()
                if task.status == "completed" and hasattr(task, 'completed_at') and task.completed_at
            ]
            # Sort by completion time
            completed.sort(key=lambda x: x.get('completed_at', ''), reverse=True)
            return completed[:limit]
        except:
            return []

    def _get_cached_context(self, cache_key: str) -> dict[str, Any] | None:
        """Hole Context aus Cache wenn noch gültig"""
        if cache_key in self._context_cache:
            timestamp, data = self._context_cache[cache_key]
            if time.time() - timestamp < self.cache_ttl:
                return data
            else:
                del self._context_cache[cache_key]
        return None

    def _cache_context(self, cache_key: str, context: dict[str, Any]):
        """Speichere Context in Cache"""
        self._context_cache[cache_key] = (time.time(), context.copy())

        # Cleanup old cache entries
        if len(self._context_cache) > 50:  # Keep max 50 entries
            oldest_key = min(self._context_cache.keys(),
                             key=lambda k: self._context_cache[k][0])
            del self._context_cache[oldest_key]

    def _invalidate_cache(self, session_id: str = None):
        """Invalidate cache for specific session or all"""
        if session_id:
            # Remove all cache entries for this session
            keys_to_remove = [k for k in self._context_cache if session_id in k]
            for key in keys_to_remove:
                del self._context_cache[key]
        else:
            self._context_cache.clear()

    def get_session_statistics(self) -> dict[str, Any]:
        """Hole Statistiken über alle Sessions"""
        stats = {
            "total_sessions": len(self.session_managers),
            "active_sessions": [],
            "cache_entries": len(self._context_cache),
            "cache_hit_rate": 0.0  # Could be tracked if needed
        }

        for session_id, session in self.session_managers.items():
            session_info = {
                "session_id": session_id,
                "fallback_mode": isinstance(session, dict) and session.get('fallback_mode', False)
            }

            if hasattr(session, 'history'):
                session_info["message_count"] = len(session.history)
            elif isinstance(session, dict) and 'history' in session:
                session_info["message_count"] = len(session['history'])

            stats["active_sessions"].append(session_info)

        return stats

    async def cleanup_old_sessions(self, max_age_hours: int = 168) -> int:
        """Cleanup alte Sessions (default: 1 Woche)"""
        try:
            cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
            removed_count = 0

            sessions_to_remove = []
            for session_id, session in self.session_managers.items():
                should_remove = False

                # Check last activity
                if hasattr(session, 'history') and session.history:
                    last_msg = session.history[-1]
                    last_timestamp = last_msg.get('timestamp')
                    if last_timestamp:
                        try:
                            last_time = datetime.fromisoformat(last_timestamp.replace('Z', '+00:00'))
                            if last_time < cutoff_time:
                                should_remove = True
                        except:
                            pass
                elif isinstance(session, dict) and session.get('history'):
                    last_msg = session['history'][-1]
                    last_timestamp = last_msg.get('timestamp')
                    if last_timestamp:
                        try:
                            last_time = datetime.fromisoformat(last_timestamp.replace('Z', '+00:00'))
                            if last_time < cutoff_time:
                                should_remove = True
                        except:
                            pass

                if should_remove:
                    sessions_to_remove.append(session_id)

            # Remove old sessions
            for session_id in sessions_to_remove:
                session = self.session_managers[session_id]
                if hasattr(session, 'on_exit'):
                    session.on_exit()  # Save ChatSession data
                del self.session_managers[session_id]
                removed_count += 1

                # Remove from variable manager
                if self.variable_manager:
                    scope_name = f'session_{session_id}'
                    if scope_name in self.variable_manager.scopes:
                        del self.variable_manager.scopes[scope_name]

            # Clear related cache entries
            self._invalidate_cache()

            return removed_count
        except Exception as e:
            eprint(f"Error cleaning up old sessions: {e}")
            return 0
add_interaction(session_id, role, content, metadata=None) async

Einheitlicher Weg um Interaktionen in ChatSession zu speichern

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7614
7615
7616
7617
7618
7619
7620
7621
7622
7623
7624
7625
7626
7627
7628
7629
7630
7631
7632
7633
7634
7635
7636
7637
7638
7639
7640
7641
7642
7643
7644
7645
7646
async def add_interaction(self, session_id: str, role: str, content: str, metadata: dict = None) -> None:
    """Einheitlicher Weg um Interaktionen in ChatSession zu speichern"""
    session = await self.initialize_session(session_id)

    message = {
        'role': role,
        'content': content,
        'timestamp': datetime.now().isoformat(),
        'session_id': session_id,
        'metadata': metadata or {}
    }

    # PRIMARY: Store in ChatSession
    if hasattr(session, 'add_message'):
        await session.add_message(message, direct=False)
    elif isinstance(session, dict) and 'history' in session:
        # Fallback mode
        session['history'].append(message)
        # Keep max length
        max_len = 200
        if len(session['history']) > max_len:
            session['history'] = session['history'][-max_len:]

    # SECONDARY: Update VariableManager
    if self.variable_manager:
        self.variable_manager.set(f'session_{session_id}.last_interaction', message)
        if hasattr(session, 'history'):
            self.variable_manager.set(f'session_{session_id}.history_length', len(session.history))
        elif isinstance(session, dict):
            self.variable_manager.set(f'session_{session_id}.history_length', len(session.get('history', [])))

    # Clear context cache for this session
    self._invalidate_cache(session_id)
build_unified_context(session_id, query=None, context_type='full') async

ZENTRALE Methode für vollständigen Context-Aufbau aus allen Quellen

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7678
7679
7680
7681
7682
7683
7684
7685
7686
7687
7688
7689
7690
7691
7692
7693
7694
7695
7696
7697
7698
7699
7700
7701
7702
7703
7704
7705
7706
7707
7708
7709
7710
7711
7712
7713
7714
7715
7716
7717
7718
7719
7720
7721
7722
7723
7724
7725
7726
7727
7728
7729
7730
7731
7732
7733
7734
7735
7736
7737
7738
7739
7740
async def build_unified_context(self, session_id: str, query: str = None, context_type: str = "full") -> dict[
    str, Any]:
    """ZENTRALE Methode für vollständigen Context-Aufbau aus allen Quellen"""

    # Cache check
    cache_key = f"{session_id}_{hash(query or '')}_{context_type}"
    cached = self._get_cached_context(cache_key)
    if cached:
        return cached

    context = {
        'timestamp': datetime.now().isoformat(),
        'session_id': session_id,
        'query': query,
        'context_type': context_type
    }

    try:
        # 1. CHAT HISTORY (Primary - from ChatSession)
        context['chat_history'] = await self.get_contextual_history(
            session_id, query or "", max_entries=15
        )

        # 2. VARIABLE SYSTEM STATE
        if self.variable_manager:
            context['variables'] = {
                'available_scopes': list(self.variable_manager.scopes.keys()),
                'total_variables': len(self.variable_manager.get_available_variables()),
                'recent_results': self._get_recent_results(5)
            }
        else:
            context['variables'] = {'status': 'variable_manager_not_available'}

        # 3. WORLD MODEL FACTS
        if self.variable_manager:
            world_model = self.variable_manager.get('world', {})
            if world_model and query:
                context['relevant_facts'] = self._extract_relevant_facts(world_model, query)
            else:
                context['relevant_facts'] = list(world_model.items())[:5]  # Top 5 facts

        # 4. EXECUTION STATE
        context['execution_state'] = {
            'active_tasks': self._get_active_tasks(),
            'recent_completions': self._get_recent_completions(3),
            'system_status': self.agent.shared.get('system_status', 'idle')
        }

        # 5. SESSION STATISTICS
        context['session_stats'] = {
            'total_sessions': len(self.session_managers),
            'current_session_length': len(context['chat_history']),
            'cache_enabled': bool(self._context_cache)
        }

    except Exception as e:
        eprint(f"Error building unified context: {e}")
        context['error'] = str(e)
        context['fallback_mode'] = True

    # Cache result
    self._cache_context(cache_key, context)
    return context
cleanup_old_sessions(max_age_hours=168) async

Cleanup alte Sessions (default: 1 Woche)

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7946
7947
7948
7949
7950
7951
7952
7953
7954
7955
7956
7957
7958
7959
7960
7961
7962
7963
7964
7965
7966
7967
7968
7969
7970
7971
7972
7973
7974
7975
7976
7977
7978
7979
7980
7981
7982
7983
7984
7985
7986
7987
7988
7989
7990
7991
7992
7993
7994
7995
7996
7997
7998
7999
8000
8001
async def cleanup_old_sessions(self, max_age_hours: int = 168) -> int:
    """Cleanup alte Sessions (default: 1 Woche)"""
    try:
        cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
        removed_count = 0

        sessions_to_remove = []
        for session_id, session in self.session_managers.items():
            should_remove = False

            # Check last activity
            if hasattr(session, 'history') and session.history:
                last_msg = session.history[-1]
                last_timestamp = last_msg.get('timestamp')
                if last_timestamp:
                    try:
                        last_time = datetime.fromisoformat(last_timestamp.replace('Z', '+00:00'))
                        if last_time < cutoff_time:
                            should_remove = True
                    except:
                        pass
            elif isinstance(session, dict) and session.get('history'):
                last_msg = session['history'][-1]
                last_timestamp = last_msg.get('timestamp')
                if last_timestamp:
                    try:
                        last_time = datetime.fromisoformat(last_timestamp.replace('Z', '+00:00'))
                        if last_time < cutoff_time:
                            should_remove = True
                    except:
                        pass

            if should_remove:
                sessions_to_remove.append(session_id)

        # Remove old sessions
        for session_id in sessions_to_remove:
            session = self.session_managers[session_id]
            if hasattr(session, 'on_exit'):
                session.on_exit()  # Save ChatSession data
            del self.session_managers[session_id]
            removed_count += 1

            # Remove from variable manager
            if self.variable_manager:
                scope_name = f'session_{session_id}'
                if scope_name in self.variable_manager.scopes:
                    del self.variable_manager.scopes[scope_name]

        # Clear related cache entries
        self._invalidate_cache()

        return removed_count
    except Exception as e:
        eprint(f"Error cleaning up old sessions: {e}")
        return 0
get_contextual_history(session_id, query='', max_entries=10) async

Intelligente Auswahl relevanter Geschichte aus ChatSession

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7648
7649
7650
7651
7652
7653
7654
7655
7656
7657
7658
7659
7660
7661
7662
7663
7664
7665
7666
7667
7668
7669
7670
7671
7672
7673
7674
7675
7676
async def get_contextual_history(self, session_id: str, query: str = "", max_entries: int = 10) -> list[dict]:
    """Intelligente Auswahl relevanter Geschichte aus ChatSession"""
    session = self.session_managers.get(session_id)
    if not session:
        return []

    try:
        # ChatSession mode
        if hasattr(session, 'get_past_x'):
            recent_history = session.get_past_x(max_entries * 2, last_u=False)
            # await session.get_reference(query, limit=5)

            return recent_history[:max_entries]

        # Fallback mode
        elif isinstance(session, dict) and 'history' in session:
            history = session['history']
            # Return last max_entries, starting with last user message
            result = []
            for msg in reversed(history[-max_entries * 2:]):
                result.append(msg)
                if msg.get('role') == 'user' and len(result) >= max_entries:
                    break
            return list(reversed(result))[:max_entries]

    except Exception as e:
        eprint(f"Error getting contextual history: {e}")

    return []
get_formatted_context_for_llm(unified_context)

Formatiere unified context für LLM consumption

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7742
7743
7744
7745
7746
7747
7748
7749
7750
7751
7752
7753
7754
7755
7756
7757
7758
7759
7760
7761
7762
7763
7764
7765
7766
7767
7768
7769
7770
7771
7772
7773
7774
7775
7776
7777
7778
7779
7780
7781
7782
7783
7784
7785
7786
7787
7788
7789
7790
7791
7792
7793
def get_formatted_context_for_llm(self, unified_context: dict[str, Any]) -> str:
    """Formatiere unified context für LLM consumption"""
    try:
        parts = []

        # Recent Chat History
        chat_history = unified_context.get('chat_history', [])
        if chat_history:
            parts.append("## Recent Conversation")
            for msg in chat_history[-5:]:  # Last 5 messages
                timestamp = msg.get('timestamp', '')[:19]  # Remove microseconds
                role = msg.get('role', 'unknown')
                content = msg.get('content', '')[:500] + ("..." if len(msg.get('content', '')) > 500 else "")
                parts.append(f"[{timestamp}] {role}: {content}")

        # System Status
        execution_state = unified_context.get('execution_state', {})
        if execution_state:
            parts.append("\n## Current System State")
            parts.append(f"Status: {execution_state.get('system_status', 'unknown')}")

            active_tasks = execution_state.get('active_tasks', [])
            if active_tasks:
                parts.append(f"Active Tasks: {len(active_tasks)}")

            recent_completions = execution_state.get('recent_completions', [])
            if recent_completions:
                parts.append(f"Recent Completions: {len(recent_completions)}")

        # Available Data
        variables = unified_context.get('variables', {})
        if variables and variables.get('recent_results'):
            parts.append("\n## Available Results")
            recent_results = variables['recent_results']
            for result in recent_results[:3]:  # Top 3 results
                parts.append(f"- {result.get('task_id', 'unknown')}: {str(result.get('preview', ''))[:100]}...")

        # World Model Facts
        relevant_facts = unified_context.get('relevant_facts', [])
        if relevant_facts:
            parts.append("\n## Known Facts")
            for key, value in relevant_facts[:5]:  # Top 5 facts
                fact_preview = str(value)[:100] + ("..." if len(str(value)) > 100 else "")
                parts.append(f"- {key}: {fact_preview}")

        parts.append(f"\n---\nContext generated at: {unified_context.get('timestamp', 'unknown')}")

        return "\n".join(parts)

    except Exception as e:
        eprint(f"Error formatting context for LLM: {e}")
        return f"Context formatting error: {str(e)}"
get_session_statistics()

Hole Statistiken über alle Sessions

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7922
7923
7924
7925
7926
7927
7928
7929
7930
7931
7932
7933
7934
7935
7936
7937
7938
7939
7940
7941
7942
7943
7944
def get_session_statistics(self) -> dict[str, Any]:
    """Hole Statistiken über alle Sessions"""
    stats = {
        "total_sessions": len(self.session_managers),
        "active_sessions": [],
        "cache_entries": len(self._context_cache),
        "cache_hit_rate": 0.0  # Could be tracked if needed
    }

    for session_id, session in self.session_managers.items():
        session_info = {
            "session_id": session_id,
            "fallback_mode": isinstance(session, dict) and session.get('fallback_mode', False)
        }

        if hasattr(session, 'history'):
            session_info["message_count"] = len(session.history)
        elif isinstance(session, dict) and 'history' in session:
            session_info["message_count"] = len(session['history'])

        stats["active_sessions"].append(session_info)

    return stats
initialize_session(session_id, max_history=200) async

Initialisiere oder lade existierende ChatSession als primäre Context-Quelle

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7573
7574
7575
7576
7577
7578
7579
7580
7581
7582
7583
7584
7585
7586
7587
7588
7589
7590
7591
7592
7593
7594
7595
7596
7597
7598
7599
7600
7601
7602
7603
7604
7605
7606
7607
7608
7609
7610
7611
7612
async def initialize_session(self, session_id: str, max_history: int = 200):
    """Initialisiere oder lade existierende ChatSession als primäre Context-Quelle"""
    if session_id not in self.session_managers:
        try:
            # Get memory instance
            if not self._memory_instance:
                from toolboxv2 import get_app
                self._memory_instance = get_app().get_mod("isaa").get_memory()
            from toolboxv2.mods.isaa.extras.session import ChatSession
            # Create ChatSession as PRIMARY memory source
            session = ChatSession(
                self._memory_instance,
                max_length=max_history,
                space_name=f"ChatSession/{self.agent.amd.name}.{session_id}.unified"
            )
            self.session_managers[session_id] = session

            # Integration mit VariableManager wenn verfügbar
            if self.variable_manager:
                self.variable_manager.register_scope(f'session_{session_id}', {
                    'chat_session_active': True,
                    'history_length': len(session.history),
                    'last_interaction': None,
                    'session_id': session_id
                })

            rprint(f"Unified session context initialized for {session_id}")
            return session

        except Exception as e:
            eprint(f"Failed to create ChatSession for {session_id}: {e}")
            # Fallback: Create minimal session manager
            self.session_managers[session_id] = {
                'history': [],
                'session_id': session_id,
                'fallback_mode': True
            }
            return self.session_managers[session_id]

    return self.session_managers[session_id]
VariableManager

Unified variable management system with advanced features

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7086
7087
7088
7089
7090
7091
7092
7093
7094
7095
7096
7097
7098
7099
7100
7101
7102
7103
7104
7105
7106
7107
7108
7109
7110
7111
7112
7113
7114
7115
7116
7117
7118
7119
7120
7121
7122
7123
7124
7125
7126
7127
7128
7129
7130
7131
7132
7133
7134
7135
7136
7137
7138
7139
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
7249
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
7275
7276
7277
7278
7279
7280
7281
7282
7283
7284
7285
7286
7287
7288
7289
7290
7291
7292
7293
7294
7295
7296
7297
7298
7299
7300
7301
7302
7303
7304
7305
7306
7307
7308
7309
7310
7311
7312
7313
7314
7315
7316
7317
7318
7319
7320
7321
7322
7323
7324
7325
7326
7327
7328
7329
7330
7331
7332
7333
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
7355
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
7372
7373
7374
7375
7376
7377
7378
7379
7380
7381
7382
7383
7384
7385
7386
7387
7388
7389
7390
7391
7392
7393
7394
7395
7396
7397
7398
7399
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
7424
7425
7426
7427
7428
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442
7443
7444
7445
7446
7447
7448
7449
7450
7451
7452
7453
7454
7455
7456
7457
7458
7459
7460
7461
7462
7463
7464
7465
7466
7467
7468
7469
7470
7471
7472
7473
7474
7475
7476
7477
7478
7479
7480
7481
7482
7483
7484
7485
7486
7487
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555
class VariableManager:
    """Unified variable management system with advanced features"""

    def __init__(self, world_model: dict, shared_state: dict = None):
        self.world_model = world_model
        self.shared_state = shared_state or {}
        self.scopes = {
            'world': world_model,
            'shared': self.shared_state,
            'results': {},
            'tasks': {},
            'user': {},
            'system': {}
        }
        self._cache = {}

    def register_scope(self, name: str, data: dict):
        """Register a new variable scope"""
        self.scopes[name] = data
        self._cache.clear()

    def set_results_store(self, results_store: dict):
        """Set the results store for task result references"""
        self.scopes['results'] = results_store
        self._cache.clear()

    def set_tasks_store(self, tasks_store: dict):
        """Set tasks store for task metadata access"""
        self.scopes['tasks'] = tasks_store
        self._cache.clear()

    def _resolve_path(self, path: str):
        """
        Internal helper to navigate a path that can contain both
        dictionary keys and list indices.
        """
        parts = path.split('.')

        # Determine the starting point
        if len(parts) == 1:
            # Simple key in the top-level world_model
            current = self.world_model
        else:
            scope_name = parts[0]
            if scope_name not in self.scopes:
                raise KeyError(f"Scope '{scope_name}' not found")
            current = self.scopes[scope_name]
            parts = parts[1:]  # Continue with the rest of the path

        # Navigate through the parts
        for part in parts:
            if isinstance(current, list):
                try:
                    # It's a list, so the part must be an integer index
                    index = int(part)
                    current = current[index]
                except (ValueError, IndexError):
                    raise KeyError(f"Invalid list index '{part}' in path '{path}'")
            elif isinstance(current, dict):
                try:
                    # It's a dictionary, so the part is a key
                    current = current[part]
                except KeyError:
                    raise KeyError(f"Key '{part}' not found in path '{path}'")
            else:
                # We've hit a non-collection type (int, str, etc.) but the path continues
                raise KeyError(f"Path cannot descend into non-collection type at '{part}' in path '{path}'")

        return current

    def get(self, path: str, default=None, use_cache: bool = True):
        """Get variable with dot notation path support for dicts and lists."""
        if use_cache and path in self._cache:
            return self._cache[path]

        try:
            value = self._resolve_path(path)
            if use_cache:
                self._cache[path] = value
            return value
        except (KeyError, IndexError):
            # A KeyError or IndexError during resolution means the path is invalid
            return default

    def set(self, path: str, value, create_scope: bool = True):
        """Set variable with dot notation path support for dicts and lists."""
        # Invalidate cache for this path
        if path in self._cache:
            del self._cache[path]

        parts = path.split('.')

        if len(parts) == 1:
            # Simple key in world_model
            self.world_model[path] = value
            return

        scope_name = parts[0]
        if scope_name not in self.scopes:
            if create_scope:
                self.scopes[scope_name] = {}
            else:
                raise KeyError(f"Scope '{scope_name}' not found")

        current = self.scopes[scope_name]

        # Iterate to the second-to-last part to get the container
        for i, part in enumerate(parts[1:-1]):
            next_part = parts[i + 2]  # Look ahead to the next part in the path

            # Determine if the current part is a dictionary key or a list index
            try:
                # Try to treat it as a list index
                key = int(part)
                if not isinstance(current, list):
                    # If current is not a list, we can't use an integer index
                    raise TypeError(f"Attempted to use integer index '{key}' on non-list for path '{path}'")

                # Ensure list is long enough
                while len(current) <= key:
                    current.append(None)  # Pad with None

                # If the next level doesn't exist, create it based on the next part
                if current[key] is None:
                    current[key] = [] if next_part.isdigit() else {}

                current = current[key]

            except ValueError:
                # It's a dictionary key
                key = part
                if not isinstance(current, dict):
                    raise TypeError(f"Attempted to use string key '{key}' on non-dict for path '{path}'")

                if key not in current:
                    # Create the next level: a list if the next part is a number, else a dict
                    current[key] = [] if next_part.isdigit() else {}

                current = current[key]

        # Handle the final part (the actual assignment)
        last_part = parts[-1]

        if isinstance(current, list):
            try:
                key = int(last_part)
                while len(current) <= key:
                    current.append(None)
                current[key] = value
            except ValueError:
                current.append(value)
        elif isinstance(current, dict):
            current[last_part] = value
        elif scope_name == 'tasks' and hasattr(current, 'task_identification_attr'):# from tasks like Tooltask ... model dump and acces
            dict_data = asdict(current)
            dict_data[last_part] = value
            current = dict_data
            # update self.scopes['tasks'] with the updated task
            self.scopes['tasks'][parts[1]][last_part] = current
        else:
            raise TypeError(f"Final container is not a list or dictionary for path '{path}' its a {type(current)}")

        self._cache.clear()

    def format_text(self, text: str, context: dict = None) -> str:
        """Enhanced text formatting with multiple syntaxes"""
        if not text or not isinstance(text, str):
            return str(text) if text is not None else ""

        # Temporary context overlay
        if context:
            original_scopes = self.scopes.copy()
            self.scopes['context'] = context

        try:
            # Handle {{ variable }} syntax
            formatted = self._format_double_braces(text)

            # Handle {variable} syntax
            formatted = self._format_single_braces(formatted)

            # Handle $variable syntax
            formatted = self._format_dollar_syntax(formatted)

            return formatted

        finally:
            if context:
                self.scopes = original_scopes

    def _format_double_braces(self, text: str) -> str:
        """Handle {{ variable.path }} syntax with improved debugging"""
        import re

        def replace_var(match):
            var_path = match.group(1).strip()
            value = self.get(var_path)

            if value is None:
                # IMPROVED: Log missing variables for debugging
                available_vars = list(self.get_available_variables().keys())
                wprint(f"Variable '{var_path}' not found. Available: {available_vars[:10]}")
                return match.group(0)  # Keep original if not found

            return self._value_to_string(value)

        return re.sub(r'\{\{\s*([^}]+)\s*\}\}', replace_var, text)

    def _format_single_braces(self, text: str) -> str:
        """Handle {variable.path} syntax, including with spaces like { variable.path }."""
        import re

        def replace_var(match):
            # Extrahiert den Variablennamen und entfernt führende/nachfolgende Leerzeichen
            var_path = match.group(1).strip()

            # Ruft den Wert über die get-Methode ab, die die Punktnotation bereits verarbeitet
            value = self.get(var_path)

            # Gibt den konvertierten Wert oder das Original-Tag zurück, wenn der Wert nicht gefunden wurde
            return self._value_to_string(value) if value is not None else match.group(0)

        # Dieser Regex findet {beliebiger.inhalt} und erlaubt Leerzeichen um den Inhalt
        # Er schließt verschachtelte oder leere Klammern wie {} oder { {var} } aus.
        return re.sub(r'\{([^{}]+)\}', replace_var, text)

    def _format_dollar_syntax(self, text: str) -> str:
        """Handle $variable syntax"""
        import re

        def replace_var(match):
            var_name = match.group(1)
            value = self.get(var_name)
            return self._value_to_string(value) if value is not None else match.group(0)

        return re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', replace_var, text)

    def _value_to_string(self, value) -> str:
        """Convert value to string representation"""
        if isinstance(value, str):
            return value
        elif isinstance(value, dict | list):
            return json.dumps(value, default=str)
        else:
            return str(value)

    def validate_references(self, text: str) -> dict[str, bool]:
        """Validate all variable references in text"""
        import re

        references = {}

        # Find all {{ }} references
        double_brace_refs = re.findall(r'\{\{\s*([^}]+)\s*\}\}', text)
        for ref in double_brace_refs:
            references["{{"+ref+"}}"] = self.get(ref.strip()) is not None

        # Find all {} references
        single_brace_refs = re.findall(r'\{([^{}\s]+)\}', text)
        for ref in single_brace_refs:
            if '.' not in ref:  # Only simple vars
                references["{"+ref+"}"] = self.get(ref.strip()) is not None

        # Find all $ references
        dollar_refs = re.findall(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', text)
        for ref in dollar_refs:
            references[f"${ref}"] = self.get(ref) is not None

        return references

    def get_scope_info(self) -> dict[str, Any]:
        """Get information about all available scopes"""
        info = {}
        for scope_name, scope_data in self.scopes.items():
            if isinstance(scope_data, dict):
                info[scope_name] = {
                    'type': 'dict',
                    'keys': len(scope_data),
                    'sample_keys': list(scope_data.keys())[:5]
                }
            else:
                info[scope_name] = {
                    'type': type(scope_data).__name__,
                    'value': str(scope_data)[:100]
                }
        return info

    def _validate_task_references(self, task: Task) -> dict[str, Any]:
        """Validate all variable references in a task"""
        validation_results = {
            'valid': True,
            'errors': [],
            'warnings': []
        }

        # Check different task types
        if isinstance(task, LLMTask):
            if task.prompt_template:
                refs = self.validate_references(task.prompt_template)
                for ref, is_valid in refs.items():
                    if not is_valid:
                        validation_results['errors'].append(f"Invalid reference in prompt: {ref}")
                        validation_results['valid'] = False

        elif isinstance(task, ToolTask):
            for key, value in task.arguments.items():
                if isinstance(value, str):
                    refs = self.validate_references(value)
                    for ref, is_valid in refs.items():
                        if not is_valid:
                            validation_results['warnings'].append(f"Invalid reference in {key}: {ref}")

        return validation_results

    def get_variable_suggestions(self, query: str) -> list[str]:
        """Get variable suggestions based on query content"""

        query_lower = query.lower()
        suggestions = []

        # Check all variables for relevance
        for scope in self.scopes.values():
            for name, var_def in scope.items():
                if name in ["system_context", "task_executor_instance",
                            "index", "tool_capabilities", "use_fast_response", "task_planner_instance"]:
                    continue
                # Name similarity
                if any(word in name.lower() for word in query_lower.split()):
                    suggestions.append(name)
                    continue

                # Description similarity
                if var_def and any(word in str(var_def).lower() for word in query_lower.split()):
                    suggestions.append(name)
                    continue


        return list(set(suggestions))[:10]

    def _document_structure(self, data: Any, path_prefix: str, docs: dict[str, dict]):
        """A recursive helper to document nested dictionaries and lists."""
        if isinstance(data, dict):
            for key, value in data.items():
                # Construct the full path for the current item
                current_path = f"{path_prefix}.{key}" if path_prefix else key

                # Generate a preview for the value
                if isinstance(value, str):
                    preview = value[:70] + "..." if len(value) > 70 else value
                elif isinstance(value, dict):
                    preview = f"Object with keys: {list(value.keys())[:3]}" + ("..." if len(value.keys()) > 3 else "")
                elif isinstance(value, list):
                    preview = f"List with {len(value)} items"
                else:
                    preview = str(value)

                # Store the documentation for the current path
                docs[current_path] = {
                    'preview': preview,
                    'type': type(value).__name__
                }

                # Recurse into nested structures
                if isinstance(value, dict | list):
                    self._document_structure(value, current_path, docs)

        elif isinstance(data, list):
            for i, item in enumerate(data):
                # Construct the full path for the list item
                current_path = f"{path_prefix}.{i}"

                # Generate a preview for the item
                if isinstance(item, str):
                    preview = item[:70] + "..." if len(item) > 70 else item
                elif isinstance(item, dict):
                    preview = f"Object with keys: {list(item.keys())[:3]}" + ("..." if len(item.keys()) > 3 else "")
                elif isinstance(item, list):
                    preview = f"List with {len(item)} items"
                else:
                    preview = str(item)

                docs[current_path] = {
                    'preview': preview,
                    'type': type(item).__name__
                }

                # Recurse into nested structures
                if isinstance(item, dict | list):
                    self._document_structure(item, current_path, docs)

    def get_available_variables(self) -> dict[str, dict]:
        """
        Recursively documents all available variables from world_model and scopes
        to provide a comprehensive overview for an LLM.
        """
        all_vars_docs = {}

        # 1. Document the world_model (top-level variables)
        self._document_structure(self.world_model, "", all_vars_docs)

        # 2. Document each scope
        for scope_name, scope_data in self.scopes.items():
            # Add documentation for the scope root itself
            if scope_name == "shared":
                continue
            if isinstance(scope_data, dict):
                preview = f"Object with keys: {list(scope_data.keys())[:3]}" + (
                    "..." if len(scope_data.keys()) > 3 else "")
            elif isinstance(scope_data, list):
                preview = f"List with {len(scope_data)} items"
            elif isinstance(scope_data, str | int):
                preview = str(scope_data)
            else:
                continue

            all_vars_docs[scope_name] = {'preview': preview, 'type': type(scope_data).__name__}

            # Recurse into the scope's data
            self._document_structure(scope_data, scope_name, all_vars_docs)

        return all_vars_docs

    def get_llm_variable_context(self) -> str:
        """
        Generates a detailed variable context formatted for LLM consumption,
        explaining structure, access patterns, and listing all available variables.
        """
        context_parts = [
            "## Variable System Reference",
            "You can access a state management system to retrieve data using dot notation.",
            "Syntax: `{{ path.to.variable }}` or `$path.to.variable`.",
            "",
            "### How to Access Data",
            "The system contains nested objects (dictionaries) and lists (arrays).",
            "",
            "**1. Object (Dictionary) Access (Primary Usage):**",
            "Use a dot (`.`) to access values inside an object. This is the most common way to get data.",
            "Example: If a `user` object exists with a `profile`, you can get the name with `{{ user.profile.name }}`.",
            "",
            "**2. List (Array) Access:**",
            "If a variable is a list, use a dot (`.`) followed by a zero-based number (index) to access a specific item.",
            "Example: To get the first email from a user's email list, use `{{ user.emails.0 }}`.",
            "You can chain these access methods: `{{ user.emails.0.address }}`.",
            "",
            "### Available Variables",
            "Below is a list of all currently available variable paths, their type, and a preview of their content. (Note: Previews may be truncated).",
        ]

        variables = self.get_available_variables()
        if not variables:
            context_parts.append("- No variables are currently set.")
            return "\n".join(context_parts)

        if "shared" in variables:
            variables["shared"] = {'preview': "Shared state variables", 'type': "dict"}

        # yaml dump preview
        context_parts.append("```yaml")
        context_parts.append(yaml.dump(variables, default_flow_style=False, sort_keys=False))
        context_parts.append("```")

        # Add any final complex examples or notes
        context_parts.extend([
            "",
            "**Note on Task Results:**",
            "All task results are stored in the `results` scope. To access the data from a task, append `.data`.",
            "Example: `{{ results.'task-id-123'.data }}`"
        ])

        return "\n".join(context_parts)
format_text(text, context=None)

Enhanced text formatting with multiple syntaxes

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7250
7251
7252
7253
7254
7255
7256
7257
7258
7259
7260
7261
7262
7263
7264
7265
7266
7267
7268
7269
7270
7271
7272
7273
7274
def format_text(self, text: str, context: dict = None) -> str:
    """Enhanced text formatting with multiple syntaxes"""
    if not text or not isinstance(text, str):
        return str(text) if text is not None else ""

    # Temporary context overlay
    if context:
        original_scopes = self.scopes.copy()
        self.scopes['context'] = context

    try:
        # Handle {{ variable }} syntax
        formatted = self._format_double_braces(text)

        # Handle {variable} syntax
        formatted = self._format_single_braces(formatted)

        # Handle $variable syntax
        formatted = self._format_dollar_syntax(formatted)

        return formatted

    finally:
        if context:
            self.scopes = original_scopes
get(path, default=None, use_cache=True)

Get variable with dot notation path support for dicts and lists.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
def get(self, path: str, default=None, use_cache: bool = True):
    """Get variable with dot notation path support for dicts and lists."""
    if use_cache and path in self._cache:
        return self._cache[path]

    try:
        value = self._resolve_path(path)
        if use_cache:
            self._cache[path] = value
        return value
    except (KeyError, IndexError):
        # A KeyError or IndexError during resolution means the path is invalid
        return default
get_available_variables()

Recursively documents all available variables from world_model and scopes to provide a comprehensive overview for an LLM.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7476
7477
7478
7479
7480
7481
7482
7483
7484
7485
7486
7487
7488
7489
7490
7491
7492
7493
7494
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
def get_available_variables(self) -> dict[str, dict]:
    """
    Recursively documents all available variables from world_model and scopes
    to provide a comprehensive overview for an LLM.
    """
    all_vars_docs = {}

    # 1. Document the world_model (top-level variables)
    self._document_structure(self.world_model, "", all_vars_docs)

    # 2. Document each scope
    for scope_name, scope_data in self.scopes.items():
        # Add documentation for the scope root itself
        if scope_name == "shared":
            continue
        if isinstance(scope_data, dict):
            preview = f"Object with keys: {list(scope_data.keys())[:3]}" + (
                "..." if len(scope_data.keys()) > 3 else "")
        elif isinstance(scope_data, list):
            preview = f"List with {len(scope_data)} items"
        elif isinstance(scope_data, str | int):
            preview = str(scope_data)
        else:
            continue

        all_vars_docs[scope_name] = {'preview': preview, 'type': type(scope_data).__name__}

        # Recurse into the scope's data
        self._document_structure(scope_data, scope_name, all_vars_docs)

    return all_vars_docs
get_llm_variable_context()

Generates a detailed variable context formatted for LLM consumption, explaining structure, access patterns, and listing all available variables.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7508
7509
7510
7511
7512
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537
7538
7539
7540
7541
7542
7543
7544
7545
7546
7547
7548
7549
7550
7551
7552
7553
7554
7555
def get_llm_variable_context(self) -> str:
    """
    Generates a detailed variable context formatted for LLM consumption,
    explaining structure, access patterns, and listing all available variables.
    """
    context_parts = [
        "## Variable System Reference",
        "You can access a state management system to retrieve data using dot notation.",
        "Syntax: `{{ path.to.variable }}` or `$path.to.variable`.",
        "",
        "### How to Access Data",
        "The system contains nested objects (dictionaries) and lists (arrays).",
        "",
        "**1. Object (Dictionary) Access (Primary Usage):**",
        "Use a dot (`.`) to access values inside an object. This is the most common way to get data.",
        "Example: If a `user` object exists with a `profile`, you can get the name with `{{ user.profile.name }}`.",
        "",
        "**2. List (Array) Access:**",
        "If a variable is a list, use a dot (`.`) followed by a zero-based number (index) to access a specific item.",
        "Example: To get the first email from a user's email list, use `{{ user.emails.0 }}`.",
        "You can chain these access methods: `{{ user.emails.0.address }}`.",
        "",
        "### Available Variables",
        "Below is a list of all currently available variable paths, their type, and a preview of their content. (Note: Previews may be truncated).",
    ]

    variables = self.get_available_variables()
    if not variables:
        context_parts.append("- No variables are currently set.")
        return "\n".join(context_parts)

    if "shared" in variables:
        variables["shared"] = {'preview': "Shared state variables", 'type': "dict"}

    # yaml dump preview
    context_parts.append("```yaml")
    context_parts.append(yaml.dump(variables, default_flow_style=False, sort_keys=False))
    context_parts.append("```")

    # Add any final complex examples or notes
    context_parts.extend([
        "",
        "**Note on Task Results:**",
        "All task results are stored in the `results` scope. To access the data from a task, append `.data`.",
        "Example: `{{ results.'task-id-123'.data }}`"
    ])

    return "\n".join(context_parts)
get_scope_info()

Get information about all available scopes

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7356
7357
7358
7359
7360
7361
7362
7363
7364
7365
7366
7367
7368
7369
7370
7371
def get_scope_info(self) -> dict[str, Any]:
    """Get information about all available scopes"""
    info = {}
    for scope_name, scope_data in self.scopes.items():
        if isinstance(scope_data, dict):
            info[scope_name] = {
                'type': 'dict',
                'keys': len(scope_data),
                'sample_keys': list(scope_data.keys())[:5]
            }
        else:
            info[scope_name] = {
                'type': type(scope_data).__name__,
                'value': str(scope_data)[:100]
            }
    return info
get_variable_suggestions(query)

Get variable suggestions based on query content

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7400
7401
7402
7403
7404
7405
7406
7407
7408
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422
7423
def get_variable_suggestions(self, query: str) -> list[str]:
    """Get variable suggestions based on query content"""

    query_lower = query.lower()
    suggestions = []

    # Check all variables for relevance
    for scope in self.scopes.values():
        for name, var_def in scope.items():
            if name in ["system_context", "task_executor_instance",
                        "index", "tool_capabilities", "use_fast_response", "task_planner_instance"]:
                continue
            # Name similarity
            if any(word in name.lower() for word in query_lower.split()):
                suggestions.append(name)
                continue

            # Description similarity
            if var_def and any(word in str(var_def).lower() for word in query_lower.split()):
                suggestions.append(name)
                continue


    return list(set(suggestions))[:10]
register_scope(name, data)

Register a new variable scope

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7102
7103
7104
7105
def register_scope(self, name: str, data: dict):
    """Register a new variable scope"""
    self.scopes[name] = data
    self._cache.clear()
set(path, value, create_scope=True)

Set variable with dot notation path support for dicts and lists.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202
7203
7204
7205
7206
7207
7208
7209
7210
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239
7240
7241
7242
7243
7244
7245
7246
7247
7248
def set(self, path: str, value, create_scope: bool = True):
    """Set variable with dot notation path support for dicts and lists."""
    # Invalidate cache for this path
    if path in self._cache:
        del self._cache[path]

    parts = path.split('.')

    if len(parts) == 1:
        # Simple key in world_model
        self.world_model[path] = value
        return

    scope_name = parts[0]
    if scope_name not in self.scopes:
        if create_scope:
            self.scopes[scope_name] = {}
        else:
            raise KeyError(f"Scope '{scope_name}' not found")

    current = self.scopes[scope_name]

    # Iterate to the second-to-last part to get the container
    for i, part in enumerate(parts[1:-1]):
        next_part = parts[i + 2]  # Look ahead to the next part in the path

        # Determine if the current part is a dictionary key or a list index
        try:
            # Try to treat it as a list index
            key = int(part)
            if not isinstance(current, list):
                # If current is not a list, we can't use an integer index
                raise TypeError(f"Attempted to use integer index '{key}' on non-list for path '{path}'")

            # Ensure list is long enough
            while len(current) <= key:
                current.append(None)  # Pad with None

            # If the next level doesn't exist, create it based on the next part
            if current[key] is None:
                current[key] = [] if next_part.isdigit() else {}

            current = current[key]

        except ValueError:
            # It's a dictionary key
            key = part
            if not isinstance(current, dict):
                raise TypeError(f"Attempted to use string key '{key}' on non-dict for path '{path}'")

            if key not in current:
                # Create the next level: a list if the next part is a number, else a dict
                current[key] = [] if next_part.isdigit() else {}

            current = current[key]

    # Handle the final part (the actual assignment)
    last_part = parts[-1]

    if isinstance(current, list):
        try:
            key = int(last_part)
            while len(current) <= key:
                current.append(None)
            current[key] = value
        except ValueError:
            current.append(value)
    elif isinstance(current, dict):
        current[last_part] = value
    elif scope_name == 'tasks' and hasattr(current, 'task_identification_attr'):# from tasks like Tooltask ... model dump and acces
        dict_data = asdict(current)
        dict_data[last_part] = value
        current = dict_data
        # update self.scopes['tasks'] with the updated task
        self.scopes['tasks'][parts[1]][last_part] = current
    else:
        raise TypeError(f"Final container is not a list or dictionary for path '{path}' its a {type(current)}")

    self._cache.clear()
set_results_store(results_store)

Set the results store for task result references

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7107
7108
7109
7110
def set_results_store(self, results_store: dict):
    """Set the results store for task result references"""
    self.scopes['results'] = results_store
    self._cache.clear()
set_tasks_store(tasks_store)

Set tasks store for task metadata access

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7112
7113
7114
7115
def set_tasks_store(self, tasks_store: dict):
    """Set tasks store for task metadata access"""
    self.scopes['tasks'] = tasks_store
    self._cache.clear()
validate_references(text)

Validate all variable references in text

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
7332
7333
7334
7335
7336
7337
7338
7339
7340
7341
7342
7343
7344
7345
7346
7347
7348
7349
7350
7351
7352
7353
7354
def validate_references(self, text: str) -> dict[str, bool]:
    """Validate all variable references in text"""
    import re

    references = {}

    # Find all {{ }} references
    double_brace_refs = re.findall(r'\{\{\s*([^}]+)\s*\}\}', text)
    for ref in double_brace_refs:
        references["{{"+ref+"}}"] = self.get(ref.strip()) is not None

    # Find all {} references
    single_brace_refs = re.findall(r'\{([^{}\s]+)\}', text)
    for ref in single_brace_refs:
        if '.' not in ref:  # Only simple vars
            references["{"+ref+"}"] = self.get(ref.strip()) is not None

    # Find all $ references
    dollar_refs = re.findall(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', text)
    for ref in dollar_refs:
        references[f"${ref}"] = self.get(ref) is not None

    return references
auto_unescape(args)

Automatically unescape all strings in nested data structure.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
11041
11042
11043
def auto_unescape(args: Any) -> Any:
    """Automatically unescape all strings in nested data structure."""
    return process_nested(args)
create_task(task_type, **kwargs)

Factory für Task-Erstellung mit korrektem Typ

Source code in toolboxv2/mods/isaa/base/Agent/types.py
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def create_task(task_type: str, **kwargs) -> Task:
    """Factory für Task-Erstellung mit korrektem Typ"""
    task_classes = {
        "llm_call": LLMTask,
        "tool_call": ToolTask,
        "decision": DecisionTask,
        "generic": Task,
        "LLMTask": LLMTask,
        "ToolTask": ToolTask,
        "DecisionTask": DecisionTask,
        "Task": Task,
    }

    task_class = task_classes.get(task_type, Task)

    # Standard-Felder setzen
    if "id" not in kwargs:
        kwargs["id"] = str(uuid.uuid4())
    if "type" not in kwargs:
        kwargs["type"] = task_type
    if "critical" not in kwargs:
        kwargs["critical"] = task_type in ["llm_call", "decision"]

    # Ensure metadata is initialized
    if "metadata" not in kwargs:
        kwargs["metadata"] = {}

    # Create task and ensure post_init is called
    task = task_class(**kwargs)

    # Double-check metadata initialization
    if not hasattr(task, 'metadata') or task.metadata is None:
        task.metadata = {}

    return task
get_args_schema(func)

Generate a string representation of a function's arguments and annotations. Keeps args and *kwargs indicators and handles modern Python type hints.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10820
10821
10822
10823
10824
10825
10826
10827
10828
10829
10830
10831
10832
10833
10834
10835
10836
10837
10838
10839
10840
10841
10842
10843
10844
10845
def get_args_schema(func: Callable) -> str:
    """
    Generate a string representation of a function's arguments and annotations.
    Keeps *args and **kwargs indicators and handles modern Python type hints.
    """
    sig = inspect.signature(func)
    parts = []

    for name, param in sig.parameters.items():
        ann = ""
        if param.annotation is not inspect._empty:
            ann = f": {_annotation_to_str(param.annotation)}"

        default = ""
        if param.default is not inspect._empty:
            default = f" = {repr(param.default)}"

        prefix = ""
        if param.kind == inspect.Parameter.VAR_POSITIONAL:
            prefix = "*"
        elif param.kind == inspect.Parameter.VAR_KEYWORD:
            prefix = "**"

        parts.append(f"{prefix}{name}{ann}{default}")

    return f"({', '.join(parts)})"
get_progress_summary(self)

Get comprehensive progress summary from the agent

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10808
10809
10810
10811
10812
def get_progress_summary(self) -> dict[str, Any]:
    """Get comprehensive progress summary from the agent"""
    if hasattr(self, 'progress_tracker'):
        return self.progress_tracker.get_summary()
    return {"error": "No progress tracker available"}
needs_unescaping(text)

Detect if string likely needs unescaping.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
11018
11019
11020
def needs_unescaping(text: str) -> bool:
    """Detect if string likely needs unescaping."""
    return bool(re.search(r'\\[ntr"\'\\]', text)) or len(text) > 50
process_nested(data, max_depth=20)

Recursively process nested structures, unescaping strings that need it.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
11023
11024
11025
11026
11027
11028
11029
11030
11031
11032
11033
11034
11035
11036
11037
11038
def process_nested(data: Any, max_depth: int = 20) -> Any:
    """Recursively process nested structures, unescaping strings that need it."""
    if max_depth <= 0:
        return data

    if isinstance(data, dict):
        return {k: process_nested(v, max_depth - 1) for k, v in data.items()}

    elif isinstance(data, list | tuple):
        processed = [process_nested(item, max_depth - 1) for item in data]
        return type(data)(processed)

    elif isinstance(data, str) and needs_unescaping(data):
        return unescape_string(data)

    return data
unescape_string(text)

Universal string unescaping for any programming language.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
10997
10998
10999
11000
11001
11002
11003
11004
11005
11006
11007
11008
11009
11010
11011
11012
11013
11014
11015
def unescape_string(text: str) -> str:
    """Universal string unescaping for any programming language."""
    if not isinstance(text, str) or len(text) < 2:
        return text

    # Remove outer quotes if wrapped
    if (text.startswith('"') and text.endswith('"')) or (text.startswith("'") and text.endswith("'")):
        text = text[1:-1]

    # Universal escape sequences
    escapes = {
        '\\n': '\n', '\\t': '\t', '\\r': '\r',
        '\\"': '"', "\\'": "'", '\\\\': '\\'
    }

    for escaped, unescaped in escapes.items():
        text = text.replace(escaped, unescaped)

    return text
with_progress_tracking(cls)

Ein Klassendekorator, der die Methoden run_async, prep_async, exec_async, und exec_fallback_async automatisch mit umfassendem Progress-Tracking umwickelt.

Source code in toolboxv2/mods/isaa/base/Agent/agent.py
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def with_progress_tracking(cls):
    """
    Ein Klassendekorator, der die Methoden run_async, prep_async, exec_async,
    und exec_fallback_async automatisch mit umfassendem Progress-Tracking umwickelt.
    """

    # --- Wrapper für run_async ---
    original_run = getattr(cls, 'run_async', None)
    if original_run:
        @functools.wraps(original_run)
        async def wrapped_run_async(self, shared):
            progress_tracker = shared.get("progress_tracker")
            node_name = self.__class__.__name__

            if not progress_tracker:
                return await original_run(self, shared)

            timer_key = f"{node_name}_total"
            progress_tracker.start_timer(timer_key)
            await progress_tracker.emit_event(ProgressEvent(
                event_type="node_enter",
                timestamp=time.time(),
                node_name=node_name,
                session_id=shared.get("session_id"),
                task_id=shared.get("current_task_id"),
                plan_id=shared.get("current_plan", TaskPlan(id="none", name="none", description="none")).id if shared.get("current_plan") else None,
                status=NodeStatus.RUNNING,
                success=None
            ))

            try:
                # Hier wird die ursprüngliche Methode aufgerufen
                result = await original_run(self, shared)

                total_duration = progress_tracker.end_timer(timer_key)
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="node_exit",
                    timestamp=time.time(),
                    node_name=node_name,
                    status=NodeStatus.COMPLETED,
                    success=True,
                    node_duration=total_duration,
                    routing_decision=result,
                    session_id=shared.get("session_id"),
                    task_id=shared.get("current_task_id"),
                    metadata={"success": True}
                ))

                return result
            except Exception as e:
                total_duration = progress_tracker.end_timer(timer_key)
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="error",
                    timestamp=time.time(),
                    node_name=node_name,
                    status=NodeStatus.FAILED,
                    success=False,
                    node_duration=total_duration,
                    session_id=shared.get("session_id"),
                    metadata={"error": str(e), "error_type": type(e).__name__}
                ))
                raise

        cls.run_async = wrapped_run_async

    # --- Wrapper für prep_async ---
    original_prep = getattr(cls, 'prep_async', None)
    if original_prep:
        @functools.wraps(original_prep)
        async def wrapped_prep_async(self, shared):
            progress_tracker = shared.get("progress_tracker")
            node_name = self.__class__.__name__

            if not progress_tracker:
                return await original_prep(self, shared)
            timer_key = f"{node_name}_total_p"
            progress_tracker.start_timer(timer_key)
            timer_key = f"{node_name}_prep"
            progress_tracker.start_timer(timer_key)
            await progress_tracker.emit_event(ProgressEvent(
                event_type="node_phase",
                timestamp=time.time(),
                node_name=node_name,
                status=NodeStatus.STARTING,
                node_phase="prep",
                session_id=shared.get("session_id")
            ))

            try:
                result = await original_prep(self, shared)

                prep_duration = progress_tracker.end_timer(timer_key)
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="node_phase",
                    timestamp=time.time(),
                    status=NodeStatus.RUNNING,
                    success=True,
                    node_name=node_name,
                    node_phase="prep_complete",
                    node_duration=prep_duration,
                    session_id=shared.get("session_id")
                ))
                return result
            except Exception as e:
                progress_tracker.end_timer(timer_key)
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="error",
                    timestamp=time.time(),
                    node_name=node_name,
                    status=NodeStatus.FAILED,
                    success=False,
                    metadata={"error": str(e), "error_type": type(e).__name__},
                    node_phase="prep_failed"
                ))
                raise


        cls.prep_async = wrapped_prep_async

    # --- Wrapper für exec_async ---
    original_exec = getattr(cls, 'exec_async', None)
    if original_exec:
        @functools.wraps(original_exec)
        async def wrapped_exec_async(self, prep_res):
            progress_tracker = prep_res.get("progress_tracker") if isinstance(prep_res, dict) else None
            node_name = self.__class__.__name__

            if not progress_tracker:
                return await original_exec(self, prep_res)

            timer_key = f"{node_name}_exec"
            progress_tracker.start_timer(timer_key)
            await progress_tracker.emit_event(ProgressEvent(
                event_type="node_phase",
                timestamp=time.time(),
                node_name=node_name,
                status=NodeStatus.RUNNING,
                node_phase="exec",
                session_id=prep_res.get("session_id") if isinstance(prep_res, dict) else None
            ))

            # In exec gibt es normalerweise keine Fehlerbehandlung, da diese von run_async übernommen wird
            result = await original_exec(self, prep_res)

            exec_duration = progress_tracker.end_timer(timer_key)
            await progress_tracker.emit_event(ProgressEvent(
                event_type="node_phase",
                timestamp=time.time(),
                node_name=node_name,
                status=NodeStatus.RUNNING,
                success=True,
                node_phase="exec_complete",
                node_duration=exec_duration,
                session_id=prep_res.get("session_id") if isinstance(prep_res, dict) else None
            ))
            return result

        cls.exec_async = wrapped_exec_async

    # --- Wrapper für post_async ---
    original_post = getattr(cls, 'post_async', None)
    if original_post:
        @functools.wraps(original_post)
        async def wrapped_post_async(self, shared, prep_res, exec_res):
            if isinstance(exec_res, str):
                print("exec_res is string:", exec_res)
            progress_tracker = shared.get("progress_tracker")
            node_name = self.__class__.__name__

            if not progress_tracker:
                return await original_post(self, shared, prep_res, exec_res)

            timer_key_post = f"{node_name}_post"
            progress_tracker.start_timer(timer_key_post)
            await progress_tracker.emit_event(ProgressEvent(
                event_type="node_phase",
                timestamp=time.time(),
                node_name=node_name,
                status=NodeStatus.COMPLETING,  # Neue Phase "completing"
                node_phase="post",
                session_id=shared.get("session_id")
            ))

            try:
                # Die eigentliche post_async Methode aufrufen
                result = await original_post(self, shared, prep_res, exec_res)

                post_duration = progress_tracker.end_timer(timer_key_post)
                total_duration = progress_tracker.end_timer(f"{node_name}_total_p")  # Gesamtdauer stoppen

                # Sende das entscheidende "node_exit" Event nach erfolgreicher post-Phase
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="node_exit",
                    timestamp=time.time(),
                    node_name=node_name,
                    status=NodeStatus.COMPLETED,
                    success=True,
                    node_duration=total_duration,
                    routing_decision=result,
                    session_id=shared.get("session_id"),
                    task_id=shared.get("current_task_id"),
                    metadata={
                        "success": True,
                        "post_duration": post_duration
                    }
                ))

                return result
            except Exception as e:
                # Fehler in der post-Phase

                post_duration = progress_tracker.end_timer(timer_key_post)
                total_duration = progress_tracker.end_timer(f"{node_name}_total")
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="error",
                    timestamp=time.time(),
                    node_name=node_name,
                    status=NodeStatus.FAILED,
                    success=False,
                    node_duration=total_duration,
                    metadata={"error": str(e), "error_type": type(e).__name__, "phase": "post"},
                    node_phase="post_failed"
                ))
                raise

        cls.post_async = wrapped_post_async

    # --- Wrapper für exec_fallback_async ---
    original_fallback = getattr(cls, 'exec_fallback_async', None)
    if original_fallback:
        @functools.wraps(original_fallback)
        async def wrapped_fallback_async(self, prep_res, exc):
            progress_tracker = prep_res.get("progress_tracker") if isinstance(prep_res, dict) else None
            node_name = self.__class__.__name__

            if progress_tracker:
                timer_key = f"{node_name}_exec"
                exec_duration = progress_tracker.end_timer(timer_key)
                await progress_tracker.emit_event(ProgressEvent(
                    event_type="node_phase",
                    timestamp=time.time(),
                    node_name=node_name,
                    node_phase="exec_fallback",
                    node_duration=exec_duration,
                    status=NodeStatus.FAILED,
                    success=False,
                    session_id=prep_res.get("session_id") if isinstance(prep_res, dict) else None,
                    metadata={"error": str(exc), "error_type": type(exc).__name__},
                ))

            return await original_fallback(self, prep_res, exc)

        cls.exec_fallback_async = wrapped_fallback_async

    return cls
builder
A2AConfig

Bases: BaseModel

A2A server configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
100
101
102
103
104
105
106
107
108
109
110
class A2AConfig(BaseModel):
    """A2A server configuration"""
    model_config = ConfigDict(arbitrary_types_allowed=True)

    enabled: bool = False
    host: str = "0.0.0.0"
    port: int = 5000
    agent_name: str = None
    agent_description: str = None
    agent_version: str = "1.0.0"
    expose_tools_as_skills: bool = True
AgentConfig

Bases: BaseModel

Complete agent configuration for loading/saving

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
class AgentConfig(BaseModel):
    """Complete agent configuration for loading/saving"""
    model_config = ConfigDict(arbitrary_types_allowed=True)

    # Basic settings
    name: str = "ProductionAgent"
    description: str = "Production-ready PocketFlow agent"
    version: str = "2.0.0"

    # LLM settings
    fast_llm_model: str = "openrouter/anthropic/claude-3-haiku"
    complex_llm_model: str = "openrouter/openai/gpt-4o"
    system_message: str = """You are a production-ready autonomous agent with advanced capabilities including:
- Native MCP tool integration for extensible functionality
- A2A compatibility for agent-to-agent communication
- Dynamic task planning and execution with adaptive reflection
- Advanced context management with session awareness
- Variable system for dynamic content generation
- Checkpoint/resume capabilities for reliability

Always utilize available tools when they can help solve the user's request efficiently."""

    temperature: float = 0.7
    max_tokens_output: int = 2048
    max_tokens_input: int = 32768
    api_key_env_var: str | None = "OPENROUTER_API_KEY"
    use_fast_response: bool = True

    # Features
    mcp: MCPConfig = Field(default_factory=MCPConfig)
    a2a: A2AConfig = Field(default_factory=A2AConfig)
    telemetry: TelemetryConfig = Field(default_factory=TelemetryConfig)
    checkpoint: CheckpointConfig = Field(default_factory=CheckpointConfig)

    # Agent behavior
    max_parallel_tasks: int = 3
    verbose_logging: bool = False

    # Persona and formatting
    active_persona: str = None
    persona_profiles: dict[str, dict[str, Any]] = Field(default_factory=dict)
    default_format_config: dict[str, Any] = None

    # Custom variables and world model
    custom_variables: dict[str, Any] = Field(default_factory=dict)
    initial_world_model: dict[str, Any] = Field(default_factory=dict)
CheckpointConfig

Bases: BaseModel

Checkpoint configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
123
124
125
126
127
128
129
class CheckpointConfig(BaseModel):
    """Checkpoint configuration"""
    enabled: bool = True
    interval_seconds: int = 300  # 5 minutes
    max_checkpoints: int = 10
    checkpoint_dir: str = "./checkpoints"
    auto_save_on_exit: bool = True
FlowAgentBuilder

Production-ready FlowAgent builder focused on MCP, A2A, and robust deployment

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
class FlowAgentBuilder:
    """Production-ready FlowAgent builder focused on MCP, A2A, and robust deployment"""

    def __init__(self, config: AgentConfig = None, config_path: str = None):
        """Initialize builder with configuration"""

        if config and config_path:
            raise ValueError("Provide either config object or config_path, not both")

        if config_path:
            self.config = self.load_config(config_path)
        elif config:
            self.config = config
        else:
            self.config = AgentConfig()

        # Runtime components
        self._custom_tools: dict[str, tuple[Callable, str]] = {}
        self._mcp_tools: dict[str, dict] = {}
        from toolboxv2.mods.isaa.extras.mcp_session_manager import MCPSessionManager

        self._mcp_session_manager = MCPSessionManager()

        self._budget_manager: BudgetManager = None
        self._tracer_provider: TracerProvider = None
        self._a2a_server: Any = None

        # Set logging level
        if self.config.verbose_logging:
            logging.getLogger().setLevel(logging.DEBUG)

        iprint(f"FlowAgent Builder initialized: {self.config.name}")

    # ===== CONFIGURATION MANAGEMENT =====

    def load_config(self, config_path: str) -> AgentConfig:
        """Load agent configuration from file"""
        path = Path(config_path)
        if not path.exists():
            raise FileNotFoundError(f"Config file not found: {config_path}")

        try:
            with open(path, encoding='utf-8') as f:
                if path.suffix.lower() in ['.yaml', '.yml']:
                    data = yaml.safe_load(f)
                else:
                    data = json.load(f)

            return AgentConfig(**data)

        except Exception as e:
            eprint(f"Failed to load config from {config_path}: {e}")
            raise

    def save_config(self, config_path: str, format: str = 'yaml'):
        """Save current configuration to file"""
        path = Path(config_path)
        path.parent.mkdir(parents=True, exist_ok=True)

        try:
            data = self.config.model_dump()

            with open(path, 'w', encoding='utf-8') as f:
                if format.lower() == 'yaml':
                    yaml.dump(data, f, default_flow_style=False, indent=2)
                else:
                    json.dump(data, f, indent=2)

            iprint(f"Configuration saved to {config_path}")

        except Exception as e:
            eprint(f"Failed to save config to {config_path}: {e}")
            raise

    @classmethod
    def from_config_file(cls, config_path: str) -> 'FlowAgentBuilder':
        """Create builder from configuration file"""
        return cls(config_path=config_path)

    # ===== FLUENT BUILDER API =====

    def with_name(self, name: str) -> 'FlowAgentBuilder':
        """Set agent name"""
        self.config.name = name
        return self

    def with_models(self, fast_model: str, complex_model: str = None) -> 'FlowAgentBuilder':
        """Set LLM models"""
        self.config.fast_llm_model = fast_model
        if complex_model:
            self.config.complex_llm_model = complex_model
        return self

    def with_system_message(self, message: str) -> 'FlowAgentBuilder':
        """Set system message"""
        self.config.system_message = message
        return self

    def with_temperature(self, temp: float) -> 'FlowAgentBuilder':
        """Set temperature"""
        self.config.temperature = temp
        return self

    def with_budget_manager(self, max_cost: float = 10.0) -> 'FlowAgentBuilder':
        """Enable budget management"""
        if LITELLM_AVAILABLE:
            self._budget_manager = BudgetManager("agent")
            iprint(f"Budget manager enabled: ${max_cost}")
        else:
            wprint("LiteLLM not available, budget manager disabled")
        return self

    def verbose(self, enable: bool = True) -> 'FlowAgentBuilder':
        """Enable verbose logging"""
        self.config.verbose_logging = enable
        if enable:
            logging.getLogger().setLevel(logging.DEBUG)
        return self

    # ===== MCP INTEGRATION =====

    def enable_mcp_server(self, host: str = "0.0.0.0", port: int = 8000,
                          server_name: str = None) -> 'FlowAgentBuilder':
        """Enable MCP server"""
        if not MCP_AVAILABLE:
            wprint("MCP not available, cannot enable server")
            return self

        self.config.mcp.enabled = True
        self.config.mcp.host = host
        self.config.mcp.port = port
        self.config.mcp.server_name = server_name or f"{self.config.name}_MCP"

        iprint(f"MCP server enabled: {host}:{port}")
        return self

    async def _load_mcp_server_capabilities(self, server_name: str, server_config: dict[str, Any]):
        """Load all capabilities from MCP server with persistent session"""
        try:
            # Get or create persistent session
            session = await self._mcp_session_manager.get_session(server_name, server_config)
            if not session:
                eprint(f"Failed to create session for MCP server: {server_name}")
                return

            # Extract all capabilities
            capabilities = await self._mcp_session_manager.extract_capabilities(session, server_name)

            # Create tool wrappers
            for tool_name, tool_info in capabilities['tools'].items():
                wrapper_name = f"{server_name}_{tool_name}"
                tool_wrapper = self._create_tool_wrapper(server_name, tool_name, tool_info, session)
                self._mcp_tools[wrapper_name] = {
                    'function': tool_wrapper,
                    'description': tool_info['description'],
                    'type': 'tool',
                    'server': server_name,
                    'original_name': tool_name,
                    'input_schema': tool_info.get('input_schema'),
                    'output_schema': tool_info.get('output_schema')
                }

            # Create resource wrappers
            for resource_uri, resource_info in capabilities['resources'].items():
                wrapper_name = f"{server_name}_resource_{resource_info['name'].replace('/', '_')}"
                resource_wrapper = self._create_resource_wrapper(server_name, resource_uri, resource_info, session)

                self._mcp_tools[wrapper_name] = {
                    'function': resource_wrapper,
                    'description': f"Read resource: {resource_info['description']}",
                    'type': 'resource',
                    'server': server_name,
                    'original_uri': resource_uri
                }

            # Create resource template wrappers
            for template_uri, template_info in capabilities['resource_templates'].items():
                wrapper_name = f"{server_name}_template_{template_info['name'].replace('/', '_')}"
                template_wrapper = self._create_resource_template_wrapper(server_name, template_uri, template_info,
                                                                          session)

                self._mcp_tools[wrapper_name] = {
                    'function': template_wrapper,
                    'description': f"Access resource template: {template_info['description']}",
                    'type': 'resource_template',
                    'server': server_name,
                    'original_template': template_uri
                }

            # Create prompt wrappers
            for prompt_name, prompt_info in capabilities['prompts'].items():
                wrapper_name = f"{server_name}_prompt_{prompt_name}"
                prompt_wrapper = self._create_prompt_wrapper(server_name, prompt_name, prompt_info, session)

                self._mcp_tools[wrapper_name] = {
                    'function': prompt_wrapper,
                    'description': f"Execute prompt: {prompt_info['description']}",
                    'type': 'prompt',
                    'server': server_name,
                    'original_name': prompt_name,
                    'arguments': prompt_info.get('arguments', [])
                }

            total_capabilities = (len(capabilities['tools']) +
                                  len(capabilities['resources']) +
                                  len(capabilities['resource_templates']) +
                                  len(capabilities['prompts']))

            iprint(f"Created {total_capabilities} capability wrappers for server: {server_name}")

        except Exception as e:
            eprint(f"Failed to load capabilities from MCP server {server_name}: {e}")

    def _create_tool_wrapper(self, server_name: str, tool_name: str, tool_info: dict, session: ClientSession):
        """Create wrapper function for MCP tool with dynamic signature based on schema"""
        import inspect

        # Extract parameter information from input schema
        input_schema = tool_info.get('input_schema', {})
        output_schema = tool_info.get('output_schema', {})

        # Build parameter list
        parameters = []
        required_params = set(input_schema.get('required', []))
        properties = input_schema.get('properties', {})

        # Create parameters with proper types
        for param_name, param_info in properties.items():
            param_type = param_info.get('type', 'string')
            python_type = {
                'string': str,
                'integer': int,
                'number': float,
                'boolean': bool,
                'array': list,
                'object': dict
            }.get(param_type, str)

            # Determine if parameter is required
            if param_name in required_params:
                param = inspect.Parameter(param_name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=python_type)
            else:
                # Optional parameters get default None
                param = inspect.Parameter(param_name, inspect.Parameter.POSITIONAL_OR_KEYWORD,
                                          annotation=python_type, default=None)
            parameters.append(param)

        # Determine return type from output schema
        return_type = str  # Default
        if output_schema and 'properties' in output_schema:
            output_props = output_schema['properties']
            if len(output_props) == 1:
                # Single property, return its type directly
                prop_info = list(output_props.values())[0]
                prop_type = prop_info.get('type', 'string')
                return_type = {
                    'string': str,
                    'integer': int,
                    'number': float,
                    'boolean': bool,
                    'array': list,
                    'object': dict
                }.get(prop_type, str)
            else:
                # Multiple properties, return dict
                return_type = dict

        # Create the actual function
        async def tool_wrapper(*args, **kwargs):
            try:
                # Map arguments to schema parameters
                arguments = {}
                param_names = list(properties.keys())

                # Map positional args
                for i, arg in enumerate(args):
                    if i < len(param_names):
                        arguments[param_names[i]] = arg

                # Add keyword arguments, filtering out None for optional params
                for key, value in kwargs.items():
                    if value is not None or key in required_params:
                        arguments[key] = value

                # Validate required parameters
                missing_required = required_params - set(arguments.keys())
                if missing_required:
                    raise ValueError(f"Missing required parameters: {missing_required}")

                # Call the actual MCP tool
                result = await session.call_tool(tool_name, arguments)

                # Handle structured vs unstructured results
                if hasattr(result, 'structuredContent') and result.structuredContent:
                    structured_data = result.structuredContent

                    # If output schema expects single property, extract it
                    if output_schema and 'properties' in output_schema:
                        output_props = output_schema['properties']
                        if len(output_props) == 1:
                            prop_name = list(output_props.keys())[0]
                            if isinstance(structured_data, dict) and prop_name in structured_data:
                                return structured_data[prop_name]

                    return structured_data

                # Fallback to content extraction
                if result.content:
                    content = result.content[0]
                    if hasattr(content, 'text'):
                        return content.text
                    elif hasattr(content, 'data'):
                        return content.data
                    else:
                        return str(content)

                return "No content returned"

            except Exception as e:
                eprint(f"MCP tool {server_name}.{tool_name} failed: {e}")
                raise RuntimeError(f"Error executing {tool_name}: {str(e)}")

        # Set dynamic signature
        signature = inspect.Signature(parameters, return_annotation=return_type)
        tool_wrapper.__signature__ = signature
        tool_wrapper.__name__ = f"{server_name}_{tool_name}"
        tool_wrapper.__doc__ = tool_info.get('description', f"MCP tool: {tool_name}")
        tool_wrapper.__annotations__ = {'return': return_type}

        # Add parameter annotations
        for param in parameters:
            tool_wrapper.__annotations__[param.name] = param.annotation

        return tool_wrapper

    def _create_resource_wrapper(self, server_name: str, resource_uri: str, resource_info: dict,
                                 session: ClientSession):
        """Create wrapper function for MCP resource with proper signature"""
        import inspect

        # Resources typically don't take parameters, return string content
        async def resource_wrapper() -> str:
            """Read MCP resource content"""
            try:
                from pydantic import AnyUrl
                result = await session.read_resource(AnyUrl(resource_uri))

                if result.contents:
                    content = result.contents[0]
                    if hasattr(content, 'text'):
                        return content.text
                    elif hasattr(content, 'data'):
                        # Handle binary data
                        if isinstance(content.data, bytes):
                            return content.data.decode('utf-8', errors='ignore')
                        return str(content.data)
                    else:
                        return str(content)

                return ""

            except Exception as e:
                eprint(f"MCP resource {resource_uri} failed: {e}")
                raise RuntimeError(f"Error reading resource: {str(e)}")

        # Set signature and metadata
        signature = inspect.Signature([], return_annotation=str)
        resource_wrapper.__signature__ = signature
        resource_wrapper.__name__ = f"{server_name}_resource_{resource_info['name'].replace('/', '_').replace(':', '_')}"
        resource_wrapper.__doc__ = f"Read MCP resource: {resource_info.get('description', resource_uri)}"
        resource_wrapper.__annotations__ = {'return': str}

        return resource_wrapper

    def _create_resource_template_wrapper(self, server_name: str, template_uri: str, template_info: dict,
                                          session: ClientSession):
        """Create wrapper function for MCP resource template with dynamic parameters"""
        import inspect
        import re

        # Extract template variables from URI (e.g., {owner}, {repo})
        template_vars = re.findall(r'\{(\w+)\}', template_uri)

        # Create parameters for each template variable
        parameters = []
        for var_name in template_vars:
            param = inspect.Parameter(var_name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)
            parameters.append(param)

        async def template_wrapper(*args, **kwargs) -> str:
            """Access MCP resource template with parameters"""
            try:
                from pydantic import AnyUrl

                # Map arguments to template variables
                template_args = {}
                for i, arg in enumerate(args):
                    if i < len(template_vars):
                        template_args[template_vars[i]] = arg

                template_args.update(kwargs)

                # Validate all required template variables are provided
                missing_vars = set(template_vars) - set(template_args.keys())
                if missing_vars:
                    raise ValueError(f"Missing required template variables: {missing_vars}")

                # Replace template variables in URI
                actual_uri = template_uri
                for var_name, value in template_args.items():
                    actual_uri = actual_uri.replace(f"{{{var_name}}}", str(value))

                result = await session.read_resource(AnyUrl(actual_uri))

                if result.contents:
                    content = result.contents[0]
                    if hasattr(content, 'text'):
                        return content.text
                    elif hasattr(content, 'data'):
                        if isinstance(content.data, bytes):
                            return content.data.decode('utf-8', errors='ignore')
                        return str(content.data)
                    else:
                        return str(content)

                return ""

            except Exception as e:
                eprint(f"MCP resource template {template_uri} failed: {e}")
                raise RuntimeError(f"Error accessing resource template: {str(e)}")

        # Set dynamic signature
        signature = inspect.Signature(parameters, return_annotation=str)
        template_wrapper.__signature__ = signature
        template_wrapper.__name__ = f"{server_name}_template_{template_info['name'].replace('/', '_').replace(':', '_')}"
        template_wrapper.__doc__ = f"Access MCP resource template: {template_info.get('description', template_uri)}\nTemplate variables: {', '.join(template_vars)}"
        template_wrapper.__annotations__ = {'return': str}

        # Add parameter annotations
        for param in parameters:
            template_wrapper.__annotations__[param.name] = str

        return template_wrapper

    def _create_prompt_wrapper(self, server_name: str, prompt_name: str, prompt_info: dict, session: ClientSession):
        """Create wrapper function for MCP prompt with dynamic parameters"""
        import inspect

        # Extract parameter information from prompt arguments
        prompt_args = prompt_info.get('arguments', [])

        # Create parameters
        parameters = []
        for arg_info in prompt_args:
            arg_name = arg_info['name']
            is_required = arg_info.get('required', False)

            if is_required:
                param = inspect.Parameter(arg_name, inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=str)
            else:
                param = inspect.Parameter(arg_name, inspect.Parameter.POSITIONAL_OR_KEYWORD,
                                          annotation=str, default=None)
            parameters.append(param)

        async def prompt_wrapper(*args, **kwargs) -> str:
            """Execute MCP prompt with parameters"""
            try:
                # Map arguments
                prompt_arguments = {}
                arg_names = [arg['name'] for arg in prompt_args]

                # Map positional args
                for i, arg in enumerate(args):
                    if i < len(arg_names):
                        prompt_arguments[arg_names[i]] = arg

                # Add keyword arguments, filtering None for optional params
                required_args = {arg['name'] for arg in prompt_args if arg.get('required', False)}
                for key, value in kwargs.items():
                    if value is not None or key in required_args:
                        prompt_arguments[key] = value

                # Validate required parameters
                missing_required = required_args - set(prompt_arguments.keys())
                if missing_required:
                    raise ValueError(f"Missing required prompt arguments: {missing_required}")

                result = await session.get_prompt(prompt_name, prompt_arguments)

                # Extract and combine messages
                messages = []
                for message in result.messages:
                    if hasattr(message.content, 'text'):
                        messages.append(message.content.text)
                    else:
                        messages.append(str(message.content))

                return "\n".join(messages) if messages else ""

            except Exception as e:
                eprint(f"MCP prompt {prompt_name} failed: {e}")
                raise RuntimeError(f"Error executing prompt: {str(e)}")

        # Set dynamic signature
        signature = inspect.Signature(parameters, return_annotation=str)
        prompt_wrapper.__signature__ = signature
        prompt_wrapper.__name__ = f"{server_name}_prompt_{prompt_name}"

        # Build docstring with parameter info
        param_docs = []
        for arg_info in prompt_args:
            required_str = "required" if arg_info.get('required', False) else "optional"
            param_docs.append(
                f"    {arg_info['name']} ({required_str}): {arg_info.get('description', 'No description')}")

        docstring = f"Execute MCP prompt: {prompt_info.get('description', prompt_name)}"
        if param_docs:
            docstring += "\n\nParameters:\n" + "\n".join(param_docs)

        prompt_wrapper.__doc__ = docstring
        prompt_wrapper.__annotations__ = {'return': str}

        # Add parameter annotations
        for param in parameters:
            prompt_wrapper.__annotations__[param.name] = str

        return prompt_wrapper

    def load_mcp_tools_from_config(self, config_path: str | dict) -> 'FlowAgentBuilder':
        """Enhanced MCP config loading with automatic session management and full capability extraction"""
        if not MCP_AVAILABLE:
            wprint("MCP not available, skipping tool loading")
            return self

        if isinstance(config_path, dict):
            mcp_config = config_path
            from toolboxv2 import get_app
            name = self.config.name or "inline_config"
            path = Path(get_app().appdata) / "isaa" / "MPCConfig" / f"{name}.json"
            path.parent.mkdir(parents=True, exist_ok=True)
            path.write_text(json.dumps(mcp_config, indent=2))
            config_path = path
        else:
            config_path = Path(config_path)
            if not config_path.exists():
                raise FileNotFoundError(f"MCP config not found: {config_path}")

            try:
                with open(config_path, encoding='utf-8') as f:
                    if config_path.suffix.lower() in ['.yaml', '.yml']:
                        mcp_config = yaml.safe_load(f)
                    else:
                        mcp_config = json.load(f)

            except Exception as e:
                eprint(f"Failed to load MCP config from {config_path}: {e}")
                raise

        # Store config for async processing
        self._mcp_config_data = mcp_config
        self.config.mcp.config_path = str(config_path)

        # Mark for processing during build
        self._mcp_needs_loading = True

        iprint(f"MCP config loaded from {config_path}, will process during build")

        return self

    async def _process_mcp_config(self):
        """Process MCP configuration with proper task management"""
        if not hasattr(self, '_mcp_config_data') or not self._mcp_config_data:
            return

        mcp_config = self._mcp_config_data

        # Handle standard MCP server configuration with sequential processing to avoid task issues
        if 'mcpServers' in mcp_config:
            servers_to_load = []

            # Validate all servers first
            for server_name, server_config in mcp_config['mcpServers'].items():
                if self._validate_mcp_server_config(server_name, server_config):
                    servers_to_load.append((server_name, server_config))
                else:
                    wprint(f"Skipping invalid MCP server config: {server_name}")

            if servers_to_load:
                iprint(f"Processing {len(servers_to_load)} MCP servers sequentially...")

                # Process servers sequentially to avoid task boundary issues
                successful_loads = 0
                for server_name, server_config in servers_to_load:
                    try:
                        result = await asyncio.wait_for(
                            self._load_single_mcp_server(server_name, server_config),
                            timeout=5.0  # Per-server timeout
                        )

                        if result:
                            successful_loads += 1
                            iprint(f"✓ Successfully loaded MCP server: {server_name}")
                        else:
                            wprint(f"⚠ MCP server {server_name} loaded with issues")

                    except TimeoutError:
                        eprint(f"✗ MCP server {server_name} timed out after 15 seconds")
                    except Exception as e:
                        eprint(f"✗ Failed to load MCP server {server_name}: {e}")

                iprint(
                    f"MCP processing complete: {successful_loads}/{len(servers_to_load)} servers loaded successfully")

        # Handle direct tools configuration (legacy)
        elif 'tools' in mcp_config:
            for tool_config in mcp_config['tools']:
                try:
                    self._load_direct_mcp_tool(tool_config)
                except Exception as e:
                    eprint(f"Failed to load direct MCP tool: {e}")

    async def _load_single_mcp_server(self, server_name: str, server_config: dict[str, Any]) -> bool:
        """Load a single MCP server with timeout and error handling"""
        try:
            iprint(f"🔄 Processing MCP server: {server_name}")

            # Get session with timeout
            session = await self._mcp_session_manager.get_session_with_timeout(server_name, server_config)
            if not session:
                eprint(f"✗ Failed to create session for MCP server: {server_name}")
                return False

            # Extract capabilities with timeout
            capabilities = await self._mcp_session_manager.extract_capabilities_with_timeout(session, server_name)
            if not any(capabilities.values()):
                wprint(f"⚠ No capabilities found for MCP server: {server_name}")
                return False

            # Create wrappers for all capabilities
            await self._create_capability_wrappers(server_name, capabilities, session)

            total_caps = sum(len(caps) for caps in capabilities.values())
            iprint(f"✓ Created {total_caps} capability wrappers for: {server_name}")

            return True

        except Exception as e:
            eprint(f"✗ Error loading MCP server {server_name}: {e}")
            return False

    async def _create_capability_wrappers(self, server_name: str, capabilities: dict, session: ClientSession):
        """Create wrappers for all capabilities with error handling"""

        # Create tool wrappers
        for tool_name, tool_info in capabilities['tools'].items():
            try:
                wrapper_name = f"{server_name}_{tool_name}"
                tool_wrapper = self._create_tool_wrapper(server_name, tool_name, tool_info, session)

                self._mcp_tools[wrapper_name] = {
                    'function': tool_wrapper,
                    'description': tool_info['description'],
                    'type': 'tool',
                    'server': server_name,
                    'original_name': tool_name,
                    'input_schema': tool_info.get('input_schema'),
                    'output_schema': tool_info.get('output_schema')
                }
            except Exception as e:
                eprint(f"Failed to create tool wrapper {tool_name}: {e}")

        # Create resource wrappers
        for resource_uri, resource_info in capabilities['resources'].items():
            try:
                safe_name = resource_info['name'].replace('/', '_').replace(':', '_')
                wrapper_name = f"{server_name}_resource_{safe_name}"
                resource_wrapper = self._create_resource_wrapper(server_name, resource_uri, resource_info, session)

                self._mcp_tools[wrapper_name] = {
                    'function': resource_wrapper,
                    'description': f"Read resource: {resource_info['description']}",
                    'type': 'resource',
                    'server': server_name,
                    'original_uri': resource_uri
                }
            except Exception as e:
                eprint(f"Failed to create resource wrapper {resource_uri}: {e}")

        # Create resource template wrappers
        for template_uri, template_info in capabilities['resource_templates'].items():
            try:
                safe_name = template_info['name'].replace('/', '_').replace(':', '_')
                wrapper_name = f"{server_name}_template_{safe_name}"
                template_wrapper = self._create_resource_template_wrapper(server_name, template_uri, template_info,
                                                                          session)

                self._mcp_tools[wrapper_name] = {
                    'function': template_wrapper,
                    'description': f"Access resource template: {template_info['description']}",
                    'type': 'resource_template',
                    'server': server_name,
                    'original_template': template_uri
                }
            except Exception as e:
                eprint(f"Failed to create template wrapper {template_uri}: {e}")

        # Create prompt wrappers
        for prompt_name, prompt_info in capabilities['prompts'].items():
            try:
                wrapper_name = f"{server_name}_prompt_{prompt_name}"
                prompt_wrapper = self._create_prompt_wrapper(server_name, prompt_name, prompt_info, session)

                self._mcp_tools[wrapper_name] = {
                    'function': prompt_wrapper,
                    'description': f"Execute prompt: {prompt_info['description']}",
                    'type': 'prompt',
                    'server': server_name,
                    'original_name': prompt_name,
                    'arguments': prompt_info.get('arguments', [])
                }
            except Exception as e:
                eprint(f"Failed to create prompt wrapper {prompt_name}: {e}")

    @staticmethod
    def _validate_mcp_server_config(server_name: str, server_config: dict[str, Any]) -> bool:
        """Validate MCP server configuration"""
        command = server_config.get('command')
        if not command:
            eprint(f"MCP server {server_name} missing 'command' field")
            return False

        # Check if command exists and is executable
        if command in ['npx', 'node', 'python', 'python3', 'docker']:
            # These are common commands, assume they exist
            return True

        if server_config.get('transport') in ['http', 'streamable-http'] and server_config.get('url'):
            return True

        # For other commands, check if they exist
        import shutil
        if not shutil.which(command):
            wprint(f"MCP server {server_name}: command '{command}' not found in PATH")
            # Don't fail completely, just warn - the command might be available at runtime

        args = server_config.get('args', [])
        if not isinstance(args, list):
            eprint(f"MCP server {server_name}: 'args' must be a list")
            return False

        env = server_config.get('env', {})
        if not isinstance(env, dict):
            eprint(f"MCP server {server_name}: 'env' must be a dictionary")
            return False

        iprint(f"Validated MCP server config: {server_name}")
        return True

    def _load_direct_mcp_tool(self, tool_config: dict[str, Any]):
        """Load tool from direct configuration"""
        name = tool_config.get('name')
        description = tool_config.get('description', '')
        function_code = tool_config.get('function_code')

        if not name or not function_code:
            wprint(f"Incomplete tool config: {tool_config}")
            return

        # Create function from code
        try:
            namespace = {"__builtins__": __builtins__}
            exec(function_code, namespace)

            # Find the function
            func = None
            for obj in namespace.values():
                if callable(obj) and not getattr(obj, '__name__', '').startswith('_'):
                    func = obj
                    break

            if func:
                self._mcp_tools[name] = {
                    'function': func,
                    'description': description,
                    'source': 'code'
                }
                iprint(f"Loaded MCP tool from code: {name}")

        except Exception as e:
            eprint(f"Failed to load MCP tool {name}: {e}")

    def add_mcp_tool_from_code(self, name: str, code: str, description: str = "") -> 'FlowAgentBuilder':
        """Add MCP tool from code string"""
        tool_config = {
            'name': name,
            'description': description,
            'function_code': code
        }
        self._load_direct_mcp_tool(tool_config)
        return self

    # ===== A2A INTEGRATION =====

    def enable_a2a_server(self, host: str = "0.0.0.0", port: int = 5000,
                          agent_name: str = None, agent_description: str = None) -> 'FlowAgentBuilder':
        """Enable A2A server for agent-to-agent communication"""
        if not A2A_AVAILABLE:
            wprint("A2A not available, cannot enable server")
            return self

        self.config.a2a.enabled = True
        self.config.a2a.host = host
        self.config.a2a.port = port
        self.config.a2a.agent_name = agent_name or self.config.name
        self.config.a2a.agent_description = agent_description or self.config.description

        iprint(f"A2A server enabled: {host}:{port}")
        return self

    # ===== TELEMETRY INTEGRATION =====

    def enable_telemetry(self, service_name: str = None, endpoint: str = None,
                         console_export: bool = True) -> 'FlowAgentBuilder':
        """Enable OpenTelemetry tracing"""
        if not OTEL_AVAILABLE:
            wprint("OpenTelemetry not available, cannot enable telemetry")
            return self

        self.config.telemetry.enabled = True
        self.config.telemetry.service_name = service_name or self.config.name
        self.config.telemetry.endpoint = endpoint
        self.config.telemetry.console_export = console_export

        # Initialize tracer provider
        self._tracer_provider = TracerProvider()
        trace.set_tracer_provider(self._tracer_provider)

        # Add exporters
        if console_export:
            console_exporter = ConsoleSpanExporter()
            span_processor = BatchSpanProcessor(console_exporter)
            self._tracer_provider.add_span_processor(span_processor)

        if endpoint:
            try:
                otlp_exporter = OTLPSpanExporter(endpoint=endpoint)
                otlp_processor = BatchSpanProcessor(otlp_exporter)
                self._tracer_provider.add_span_processor(otlp_processor)
            except Exception as e:
                wprint(f"Failed to setup OTLP exporter: {e}")

        iprint(f"Telemetry enabled for service: {service_name}")
        return self

    # ===== CHECKPOINT CONFIGURATION =====

    def with_checkpointing(self, enabled: bool = True, interval_seconds: int = 300,
                           checkpoint_dir: str = "./checkpoints", max_checkpoints: int = 10) -> 'FlowAgentBuilder':
        """Configure checkpointing"""
        self.config.checkpoint.enabled = enabled
        self.config.checkpoint.interval_seconds = interval_seconds
        self.config.checkpoint.checkpoint_dir = checkpoint_dir
        self.config.checkpoint.max_checkpoints = max_checkpoints

        if enabled:
            # Ensure checkpoint directory exists
            Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
            iprint(f"Checkpointing enabled: {checkpoint_dir} (every {interval_seconds}s)")

        return self

    # ===== TOOL MANAGEMENT =====

    def add_tool(self, func: Callable, name: str = None, description: str = None) -> 'FlowAgentBuilder':
        """Add custom tool function"""
        tool_name = name or func.__name__
        self._custom_tools[tool_name] = (func, description or func.__doc__)

        iprint(f"Tool added: {tool_name}")
        return self

    def add_tools_from_module(self, module, prefix: str = "", exclude: list[str] = None) -> 'FlowAgentBuilder':
        """Add all functions from a module as tools"""
        exclude = exclude or []

        for name, obj in inspect.getmembers(module, inspect.isfunction):
            if name in exclude or name.startswith('_'):
                continue

            tool_name = f"{prefix}{name}" if prefix else name
            self.add_tool(obj, name=tool_name)

        iprint(f"Added tools from module {module.__name__}")
        return self

    # ===== PERSONA MANAGEMENT =====

    def add_persona_profile(self, profile_name: str, name: str, style: str = "professional",
                            tone: str = "friendly", personality_traits: list[str] = None,
                            custom_instructions: str = "", response_format: str = None,
                            text_length: str = None) -> 'FlowAgentBuilder':
        """Add a persona profile with optional format configuration"""

        if personality_traits is None:
            personality_traits = ["helpful", "concise"]

        # Create persona config
        persona_data = {
            "name": name,
            "style": style,
            "tone": tone,
            "personality_traits": personality_traits,
            "custom_instructions": custom_instructions,
            "apply_method": "system_prompt",
            "integration_level": "light"
        }

        # Add format config if specified
        if response_format or text_length:
            format_config = {
                "response_format": response_format or "frei-text",
                "text_length": text_length or "chat-conversation",
                "custom_instructions": "",
                "strict_format_adherence": True,
                "quality_threshold": 0.7
            }
            persona_data["format_config"] = format_config

        self.config.persona_profiles[profile_name] = persona_data
        iprint(f"Persona profile added: {profile_name}")
        return self

    def set_active_persona(self, profile_name: str) -> 'FlowAgentBuilder':
        """Set active persona profile"""
        if profile_name in self.config.persona_profiles:
            self.config.active_persona = profile_name
            iprint(f"Active persona set: {profile_name}")
        else:
            wprint(f"Persona profile not found: {profile_name}")
        return self

    def with_developer_persona(self, name: str = "Senior Developer") -> 'FlowAgentBuilder':
        """Add and set a pre-built developer persona"""
        return (self
                .add_persona_profile(
            "developer",
            name=name,
            style="technical",
            tone="professional",
            personality_traits=["precise", "thorough", "security_conscious", "best_practices"],
            custom_instructions="Focus on code quality, maintainability, and security. Always consider edge cases.",
            response_format="code-structure",
            text_length="detailed-indepth"
        )
                .set_active_persona("developer"))

    def with_analyst_persona(self, name: str = "Data Analyst") -> 'FlowAgentBuilder':
        """Add and set a pre-built analyst persona"""
        return (self
                .add_persona_profile(
            "analyst",
            name=name,
            style="analytical",
            tone="objective",
            personality_traits=["methodical", "insight_driven", "evidence_based"],
            custom_instructions="Focus on statistical rigor and actionable recommendations.",
            response_format="with-tables",
            text_length="detailed-indepth"
        )
                .set_active_persona("analyst"))

    def with_assistant_persona(self, name: str = "AI Assistant") -> 'FlowAgentBuilder':
        """Add and set a pre-built general assistant persona"""
        return (self
                .add_persona_profile(
            "assistant",
            name=name,
            style="friendly",
            tone="helpful",
            personality_traits=["helpful", "patient", "clear", "adaptive"],
            custom_instructions="Be helpful and adapt communication to user expertise level.",
            response_format="with-bullet-points",
            text_length="chat-conversation"
        )
                .set_active_persona("assistant"))

    def with_creative_persona(self, name: str = "Creative Assistant") -> 'FlowAgentBuilder':
        """Add and set a pre-built creative persona"""
        return (self
                .add_persona_profile(
            "creative",
            name=name,
            style="creative",
            tone="inspiring",
            personality_traits=["imaginative", "expressive", "innovative", "engaging"],
            custom_instructions="Think outside the box and provide creative, inspiring solutions.",
            response_format="md-text",
            text_length="detailed-indepth"
        )
                .set_active_persona("creative"))

    def with_executive_persona(self, name: str = "Executive Assistant") -> 'FlowAgentBuilder':
        """Add and set a pre-built executive persona"""
        return (self
                .add_persona_profile(
            "executive",
            name=name,
            style="professional",
            tone="authoritative",
            personality_traits=["strategic", "decisive", "results_oriented", "efficient"],
            custom_instructions="Provide strategic insights with executive-level clarity and focus on outcomes.",
            response_format="with-bullet-points",
            text_length="table-conversation"
        )
                .set_active_persona("executive"))

    # ===== VARIABLE MANAGEMENT =====

    def with_custom_variables(self, variables: dict[str, Any]) -> 'FlowAgentBuilder':
        """Add custom variables"""
        self.config.custom_variables.update(variables)
        return self

    def with_world_model(self, world_model: dict[str, Any]) -> 'FlowAgentBuilder':
        """Set initial world model"""
        self.config.initial_world_model.update(world_model)
        return self

    # ===== VALIDATION =====

    def validate_config(self) -> dict[str, list[str]]:
        """Validate the current configuration"""
        issues = {"errors": [], "warnings": []}

        # Validate required settings
        if not self.config.fast_llm_model:
            issues["errors"].append("Fast LLM model not specified")
        if not self.config.complex_llm_model:
            issues["errors"].append("Complex LLM model not specified")

        # Validate MCP configuration
        if self.config.mcp.enabled and not MCP_AVAILABLE:
            issues["errors"].append("MCP enabled but MCP not available")

        # Validate A2A configuration
        if self.config.a2a.enabled and not A2A_AVAILABLE:
            issues["errors"].append("A2A enabled but A2A not available")

        # Validate telemetry
        if self.config.telemetry.enabled and not OTEL_AVAILABLE:
            issues["errors"].append("Telemetry enabled but OpenTelemetry not available")

        # Validate personas
        if self.config.active_persona and self.config.active_persona not in self.config.persona_profiles:
            issues["errors"].append(f"Active persona '{self.config.active_persona}' not found in profiles")

        # Validate checkpoint directory
        if self.config.checkpoint.enabled:
            try:
                Path(self.config.checkpoint.checkpoint_dir).mkdir(parents=True, exist_ok=True)
            except Exception as e:
                issues["warnings"].append(f"Cannot create checkpoint directory: {e}")

        return issues

    # ===== MAIN BUILD METHOD =====

    async def build(self) -> FlowAgent:
        """Build the production-ready FlowAgent"""

        with Spinner(message=f"Building Agent {self.config.name}", symbols='c'):
            iprint(f"Building production FlowAgent: {self.config.name}")

            # Validate configuration
            validation_issues = self.validate_config()
            if validation_issues["errors"]:
                error_msg = f"Configuration validation failed: {', '.join(validation_issues['errors'])}"
                eprint(error_msg)
                raise ValueError(error_msg)

            # Log warnings
            for warning in validation_issues["warnings"]:
                wprint(f"Configuration warning: {warning}")

            try:
                # 1. Setup API configuration
                api_key = None
                if self.config.api_key_env_var:
                    api_key = os.getenv(self.config.api_key_env_var)
                    if not api_key:
                        wprint(f"API key env var {self.config.api_key_env_var} not set")

                # 2. Create persona if configured
                active_persona = None
                if self.config.active_persona and self.config.active_persona in self.config.persona_profiles:
                    persona_data = self.config.persona_profiles[self.config.active_persona]

                    # Create FormatConfig if present
                    format_config = None
                    if "format_config" in persona_data:
                        fc_data = persona_data.pop("format_config")
                        format_config = FormatConfig(
                            response_format=ResponseFormat(fc_data.get("response_format", "frei-text")),
                            text_length=TextLength(fc_data.get("text_length", "chat-conversation")),
                            custom_instructions=fc_data.get("custom_instructions", ""),
                            strict_format_adherence=fc_data.get("strict_format_adherence", True),
                            quality_threshold=fc_data.get("quality_threshold", 0.7)
                        )

                    active_persona = PersonaConfig(**persona_data)
                    active_persona.format_config = format_config

                    iprint(f"Using persona: {active_persona.name}")

                # 3. Create AgentModelData
                amd = AgentModelData(
                    name=self.config.name,
                    fast_llm_model=self.config.fast_llm_model,
                    complex_llm_model=self.config.complex_llm_model,
                    system_message=self.config.system_message,
                    temperature=self.config.temperature,
                    max_tokens=self.config.max_tokens_output,
                    max_input_tokens=self.config.max_tokens_input,
                    api_key=api_key,
                    budget_manager=self._budget_manager,
                    persona=active_persona,
                    use_fast_response=self.config.use_fast_response
                )

                # 4. Create FlowAgent
                agent = FlowAgent(
                    amd=amd,
                    world_model=self.config.initial_world_model.copy(),
                    verbose=self.config.verbose_logging,
                    enable_pause_resume=self.config.checkpoint.enabled,
                    checkpoint_interval=self.config.checkpoint.interval_seconds,
                    max_parallel_tasks=self.config.max_parallel_tasks
                )

                # 5. Add custom variables
                for key, value in self.config.custom_variables.items():
                    agent.set_variable(key, value)

                # 6. Add custom tools
                tools_added = 0
                for tool_name, (tool_func, tool_description) in self._custom_tools.items():
                    try:
                        await agent.add_tool(tool_func, tool_name, tool_description)
                        tools_added += 1
                    except Exception as e:
                        eprint(f"Failed to add tool {tool_name}: {e}")

                with Spinner(message="Loading MCP", symbols='w'):
                    # 6a. Process MCP configuration if needed
                    if hasattr(self, '_mcp_needs_loading') and self._mcp_needs_loading:
                        await self._process_mcp_config()

                # 7. Add MCP tools
                for tool_name, tool_info in self._mcp_tools.items():
                    try:
                        await agent.add_tool(
                            tool_info['function'],
                            tool_name,
                            tool_info['description']
                        )
                        tools_added += 1
                    except Exception as e:
                        eprint(f"Failed to add MCP tool {tool_name}: {e}")

                agent._mcp_session_manager = self._mcp_session_manager

                # 8. Setup MCP server
                if self.config.mcp.enabled and MCP_AVAILABLE:
                    try:
                        agent.setup_mcp_server(
                            host=self.config.mcp.host,
                            port=self.config.mcp.port,
                            name=self.config.mcp.server_name
                        )
                        iprint("MCP server configured")
                    except Exception as e:
                        eprint(f"Failed to setup MCP server: {e}")

                # 9. Setup A2A server
                if self.config.a2a.enabled and A2A_AVAILABLE:
                    try:
                        agent.setup_a2a_server(
                            host=self.config.a2a.host,
                            port=self.config.a2a.port
                        )
                        iprint("A2A server configured")
                    except Exception as e:
                        eprint(f"Failed to setup A2A server: {e}")

                # 10. Initialize enhanced session context
                try:
                    await agent.initialize_session_context(max_history=200)
                    iprint("Enhanced session context initialized")
                except Exception as e:
                    wprint(f"Session context initialization failed: {e}")


                # Final summary
                iprint("ok FlowAgent built successfully!")
                iprint(f"   Agent: {agent.amd.name}")
                iprint(f"   Tools: {tools_added}")
                iprint(f"   MCP: {'ok' if self.config.mcp.enabled else 'F'}")
                iprint(f"   A2A: {'ok' if self.config.a2a.enabled else 'F'}")
                iprint(f"   Telemetry: {'ok' if self.config.telemetry.enabled else 'F'}")
                iprint(f"   Checkpoints: {'ok' if self.config.checkpoint.enabled else 'F'}")
                iprint(f"   Persona: {active_persona.name if active_persona else 'Default'}")

                return agent

            except Exception as e:
                eprint(f"Failed to build FlowAgent: {e}")
                raise

    # ===== FACTORY METHODS =====

    @classmethod
    def create_developer_agent(cls, name: str = "DeveloperAgent",
                               with_mcp: bool = True, with_a2a: bool = False) -> 'FlowAgentBuilder':
        """Create a pre-configured developer agent"""
        builder = (cls()
                   .with_name(name)
                   .with_developer_persona()
                   .with_checkpointing(enabled=True, interval_seconds=300)
                   .verbose(True))

        if with_mcp:
            builder.enable_mcp_server(port=8001)
        if with_a2a:
            builder.enable_a2a_server(port=5001)

        return builder

    @classmethod
    def create_analyst_agent(cls, name: str = "AnalystAgent",
                             with_telemetry: bool = True) -> 'FlowAgentBuilder':
        """Create a pre-configured data analyst agent"""
        builder = (cls()
                   .with_name(name)
                   .with_analyst_persona()
                   .with_checkpointing(enabled=True)
                   .verbose(False))

        if with_telemetry:
            builder.enable_telemetry(console_export=True)

        return builder

    @classmethod
    def create_general_assistant(cls, name: str = "AssistantAgent",
                                 full_integration: bool = True) -> 'FlowAgentBuilder':
        """Create a general-purpose assistant with full integration"""
        builder = (cls()
                   .with_name(name)
                   .with_assistant_persona()
                   .with_checkpointing(enabled=True))

        if full_integration:
            builder.enable_mcp_server()
            builder.enable_a2a_server()
            builder.enable_telemetry()

        return builder

    @classmethod
    def create_creative_agent(cls, name: str = "CreativeAgent") -> 'FlowAgentBuilder':
        """Create a creative assistant agent"""
        return (cls()
                .with_name(name)
                .with_creative_persona()
                .with_temperature(0.8)  # More creative
                .with_checkpointing(enabled=True))

    @classmethod
    def create_executive_agent(cls, name: str = "ExecutiveAgent",
                               with_integrations: bool = True) -> 'FlowAgentBuilder':
        """Create an executive assistant agent"""
        builder = (cls()
                   .with_name(name)
                   .with_executive_persona()
                   .with_checkpointing(enabled=True))

        if with_integrations:
            builder.enable_a2a_server()  # Executives need A2A for delegation
            builder.enable_telemetry()  # Need metrics

        return builder
__init__(config=None, config_path=None)

Initialize builder with configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
def __init__(self, config: AgentConfig = None, config_path: str = None):
    """Initialize builder with configuration"""

    if config and config_path:
        raise ValueError("Provide either config object or config_path, not both")

    if config_path:
        self.config = self.load_config(config_path)
    elif config:
        self.config = config
    else:
        self.config = AgentConfig()

    # Runtime components
    self._custom_tools: dict[str, tuple[Callable, str]] = {}
    self._mcp_tools: dict[str, dict] = {}
    from toolboxv2.mods.isaa.extras.mcp_session_manager import MCPSessionManager

    self._mcp_session_manager = MCPSessionManager()

    self._budget_manager: BudgetManager = None
    self._tracer_provider: TracerProvider = None
    self._a2a_server: Any = None

    # Set logging level
    if self.config.verbose_logging:
        logging.getLogger().setLevel(logging.DEBUG)

    iprint(f"FlowAgent Builder initialized: {self.config.name}")
add_mcp_tool_from_code(name, code, description='')

Add MCP tool from code string

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
973
974
975
976
977
978
979
980
981
def add_mcp_tool_from_code(self, name: str, code: str, description: str = "") -> 'FlowAgentBuilder':
    """Add MCP tool from code string"""
    tool_config = {
        'name': name,
        'description': description,
        'function_code': code
    }
    self._load_direct_mcp_tool(tool_config)
    return self
add_persona_profile(profile_name, name, style='professional', tone='friendly', personality_traits=None, custom_instructions='', response_format=None, text_length=None)

Add a persona profile with optional format configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
def add_persona_profile(self, profile_name: str, name: str, style: str = "professional",
                        tone: str = "friendly", personality_traits: list[str] = None,
                        custom_instructions: str = "", response_format: str = None,
                        text_length: str = None) -> 'FlowAgentBuilder':
    """Add a persona profile with optional format configuration"""

    if personality_traits is None:
        personality_traits = ["helpful", "concise"]

    # Create persona config
    persona_data = {
        "name": name,
        "style": style,
        "tone": tone,
        "personality_traits": personality_traits,
        "custom_instructions": custom_instructions,
        "apply_method": "system_prompt",
        "integration_level": "light"
    }

    # Add format config if specified
    if response_format or text_length:
        format_config = {
            "response_format": response_format or "frei-text",
            "text_length": text_length or "chat-conversation",
            "custom_instructions": "",
            "strict_format_adherence": True,
            "quality_threshold": 0.7
        }
        persona_data["format_config"] = format_config

    self.config.persona_profiles[profile_name] = persona_data
    iprint(f"Persona profile added: {profile_name}")
    return self
add_tool(func, name=None, description=None)

Add custom tool function

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1055
1056
1057
1058
1059
1060
1061
def add_tool(self, func: Callable, name: str = None, description: str = None) -> 'FlowAgentBuilder':
    """Add custom tool function"""
    tool_name = name or func.__name__
    self._custom_tools[tool_name] = (func, description or func.__doc__)

    iprint(f"Tool added: {tool_name}")
    return self
add_tools_from_module(module, prefix='', exclude=None)

Add all functions from a module as tools

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
def add_tools_from_module(self, module, prefix: str = "", exclude: list[str] = None) -> 'FlowAgentBuilder':
    """Add all functions from a module as tools"""
    exclude = exclude or []

    for name, obj in inspect.getmembers(module, inspect.isfunction):
        if name in exclude or name.startswith('_'):
            continue

        tool_name = f"{prefix}{name}" if prefix else name
        self.add_tool(obj, name=tool_name)

    iprint(f"Added tools from module {module.__name__}")
    return self
build() async

Build the production-ready FlowAgent

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
async def build(self) -> FlowAgent:
    """Build the production-ready FlowAgent"""

    with Spinner(message=f"Building Agent {self.config.name}", symbols='c'):
        iprint(f"Building production FlowAgent: {self.config.name}")

        # Validate configuration
        validation_issues = self.validate_config()
        if validation_issues["errors"]:
            error_msg = f"Configuration validation failed: {', '.join(validation_issues['errors'])}"
            eprint(error_msg)
            raise ValueError(error_msg)

        # Log warnings
        for warning in validation_issues["warnings"]:
            wprint(f"Configuration warning: {warning}")

        try:
            # 1. Setup API configuration
            api_key = None
            if self.config.api_key_env_var:
                api_key = os.getenv(self.config.api_key_env_var)
                if not api_key:
                    wprint(f"API key env var {self.config.api_key_env_var} not set")

            # 2. Create persona if configured
            active_persona = None
            if self.config.active_persona and self.config.active_persona in self.config.persona_profiles:
                persona_data = self.config.persona_profiles[self.config.active_persona]

                # Create FormatConfig if present
                format_config = None
                if "format_config" in persona_data:
                    fc_data = persona_data.pop("format_config")
                    format_config = FormatConfig(
                        response_format=ResponseFormat(fc_data.get("response_format", "frei-text")),
                        text_length=TextLength(fc_data.get("text_length", "chat-conversation")),
                        custom_instructions=fc_data.get("custom_instructions", ""),
                        strict_format_adherence=fc_data.get("strict_format_adherence", True),
                        quality_threshold=fc_data.get("quality_threshold", 0.7)
                    )

                active_persona = PersonaConfig(**persona_data)
                active_persona.format_config = format_config

                iprint(f"Using persona: {active_persona.name}")

            # 3. Create AgentModelData
            amd = AgentModelData(
                name=self.config.name,
                fast_llm_model=self.config.fast_llm_model,
                complex_llm_model=self.config.complex_llm_model,
                system_message=self.config.system_message,
                temperature=self.config.temperature,
                max_tokens=self.config.max_tokens_output,
                max_input_tokens=self.config.max_tokens_input,
                api_key=api_key,
                budget_manager=self._budget_manager,
                persona=active_persona,
                use_fast_response=self.config.use_fast_response
            )

            # 4. Create FlowAgent
            agent = FlowAgent(
                amd=amd,
                world_model=self.config.initial_world_model.copy(),
                verbose=self.config.verbose_logging,
                enable_pause_resume=self.config.checkpoint.enabled,
                checkpoint_interval=self.config.checkpoint.interval_seconds,
                max_parallel_tasks=self.config.max_parallel_tasks
            )

            # 5. Add custom variables
            for key, value in self.config.custom_variables.items():
                agent.set_variable(key, value)

            # 6. Add custom tools
            tools_added = 0
            for tool_name, (tool_func, tool_description) in self._custom_tools.items():
                try:
                    await agent.add_tool(tool_func, tool_name, tool_description)
                    tools_added += 1
                except Exception as e:
                    eprint(f"Failed to add tool {tool_name}: {e}")

            with Spinner(message="Loading MCP", symbols='w'):
                # 6a. Process MCP configuration if needed
                if hasattr(self, '_mcp_needs_loading') and self._mcp_needs_loading:
                    await self._process_mcp_config()

            # 7. Add MCP tools
            for tool_name, tool_info in self._mcp_tools.items():
                try:
                    await agent.add_tool(
                        tool_info['function'],
                        tool_name,
                        tool_info['description']
                    )
                    tools_added += 1
                except Exception as e:
                    eprint(f"Failed to add MCP tool {tool_name}: {e}")

            agent._mcp_session_manager = self._mcp_session_manager

            # 8. Setup MCP server
            if self.config.mcp.enabled and MCP_AVAILABLE:
                try:
                    agent.setup_mcp_server(
                        host=self.config.mcp.host,
                        port=self.config.mcp.port,
                        name=self.config.mcp.server_name
                    )
                    iprint("MCP server configured")
                except Exception as e:
                    eprint(f"Failed to setup MCP server: {e}")

            # 9. Setup A2A server
            if self.config.a2a.enabled and A2A_AVAILABLE:
                try:
                    agent.setup_a2a_server(
                        host=self.config.a2a.host,
                        port=self.config.a2a.port
                    )
                    iprint("A2A server configured")
                except Exception as e:
                    eprint(f"Failed to setup A2A server: {e}")

            # 10. Initialize enhanced session context
            try:
                await agent.initialize_session_context(max_history=200)
                iprint("Enhanced session context initialized")
            except Exception as e:
                wprint(f"Session context initialization failed: {e}")


            # Final summary
            iprint("ok FlowAgent built successfully!")
            iprint(f"   Agent: {agent.amd.name}")
            iprint(f"   Tools: {tools_added}")
            iprint(f"   MCP: {'ok' if self.config.mcp.enabled else 'F'}")
            iprint(f"   A2A: {'ok' if self.config.a2a.enabled else 'F'}")
            iprint(f"   Telemetry: {'ok' if self.config.telemetry.enabled else 'F'}")
            iprint(f"   Checkpoints: {'ok' if self.config.checkpoint.enabled else 'F'}")
            iprint(f"   Persona: {active_persona.name if active_persona else 'Default'}")

            return agent

        except Exception as e:
            eprint(f"Failed to build FlowAgent: {e}")
            raise
create_analyst_agent(name='AnalystAgent', with_telemetry=True) classmethod

Create a pre-configured data analyst agent

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
@classmethod
def create_analyst_agent(cls, name: str = "AnalystAgent",
                         with_telemetry: bool = True) -> 'FlowAgentBuilder':
    """Create a pre-configured data analyst agent"""
    builder = (cls()
               .with_name(name)
               .with_analyst_persona()
               .with_checkpointing(enabled=True)
               .verbose(False))

    if with_telemetry:
        builder.enable_telemetry(console_export=True)

    return builder
create_creative_agent(name='CreativeAgent') classmethod

Create a creative assistant agent

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1450
1451
1452
1453
1454
1455
1456
1457
@classmethod
def create_creative_agent(cls, name: str = "CreativeAgent") -> 'FlowAgentBuilder':
    """Create a creative assistant agent"""
    return (cls()
            .with_name(name)
            .with_creative_persona()
            .with_temperature(0.8)  # More creative
            .with_checkpointing(enabled=True))
create_developer_agent(name='DeveloperAgent', with_mcp=True, with_a2a=False) classmethod

Create a pre-configured developer agent

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
@classmethod
def create_developer_agent(cls, name: str = "DeveloperAgent",
                           with_mcp: bool = True, with_a2a: bool = False) -> 'FlowAgentBuilder':
    """Create a pre-configured developer agent"""
    builder = (cls()
               .with_name(name)
               .with_developer_persona()
               .with_checkpointing(enabled=True, interval_seconds=300)
               .verbose(True))

    if with_mcp:
        builder.enable_mcp_server(port=8001)
    if with_a2a:
        builder.enable_a2a_server(port=5001)

    return builder
create_executive_agent(name='ExecutiveAgent', with_integrations=True) classmethod

Create an executive assistant agent

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
@classmethod
def create_executive_agent(cls, name: str = "ExecutiveAgent",
                           with_integrations: bool = True) -> 'FlowAgentBuilder':
    """Create an executive assistant agent"""
    builder = (cls()
               .with_name(name)
               .with_executive_persona()
               .with_checkpointing(enabled=True))

    if with_integrations:
        builder.enable_a2a_server()  # Executives need A2A for delegation
        builder.enable_telemetry()  # Need metrics

    return builder
create_general_assistant(name='AssistantAgent', full_integration=True) classmethod

Create a general-purpose assistant with full integration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
@classmethod
def create_general_assistant(cls, name: str = "AssistantAgent",
                             full_integration: bool = True) -> 'FlowAgentBuilder':
    """Create a general-purpose assistant with full integration"""
    builder = (cls()
               .with_name(name)
               .with_assistant_persona()
               .with_checkpointing(enabled=True))

    if full_integration:
        builder.enable_mcp_server()
        builder.enable_a2a_server()
        builder.enable_telemetry()

    return builder
enable_a2a_server(host='0.0.0.0', port=5000, agent_name=None, agent_description=None)

Enable A2A server for agent-to-agent communication

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
def enable_a2a_server(self, host: str = "0.0.0.0", port: int = 5000,
                      agent_name: str = None, agent_description: str = None) -> 'FlowAgentBuilder':
    """Enable A2A server for agent-to-agent communication"""
    if not A2A_AVAILABLE:
        wprint("A2A not available, cannot enable server")
        return self

    self.config.a2a.enabled = True
    self.config.a2a.host = host
    self.config.a2a.port = port
    self.config.a2a.agent_name = agent_name or self.config.name
    self.config.a2a.agent_description = agent_description or self.config.description

    iprint(f"A2A server enabled: {host}:{port}")
    return self
enable_mcp_server(host='0.0.0.0', port=8000, server_name=None)

Enable MCP server

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def enable_mcp_server(self, host: str = "0.0.0.0", port: int = 8000,
                      server_name: str = None) -> 'FlowAgentBuilder':
    """Enable MCP server"""
    if not MCP_AVAILABLE:
        wprint("MCP not available, cannot enable server")
        return self

    self.config.mcp.enabled = True
    self.config.mcp.host = host
    self.config.mcp.port = port
    self.config.mcp.server_name = server_name or f"{self.config.name}_MCP"

    iprint(f"MCP server enabled: {host}:{port}")
    return self
enable_telemetry(service_name=None, endpoint=None, console_export=True)

Enable OpenTelemetry tracing

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
def enable_telemetry(self, service_name: str = None, endpoint: str = None,
                     console_export: bool = True) -> 'FlowAgentBuilder':
    """Enable OpenTelemetry tracing"""
    if not OTEL_AVAILABLE:
        wprint("OpenTelemetry not available, cannot enable telemetry")
        return self

    self.config.telemetry.enabled = True
    self.config.telemetry.service_name = service_name or self.config.name
    self.config.telemetry.endpoint = endpoint
    self.config.telemetry.console_export = console_export

    # Initialize tracer provider
    self._tracer_provider = TracerProvider()
    trace.set_tracer_provider(self._tracer_provider)

    # Add exporters
    if console_export:
        console_exporter = ConsoleSpanExporter()
        span_processor = BatchSpanProcessor(console_exporter)
        self._tracer_provider.add_span_processor(span_processor)

    if endpoint:
        try:
            otlp_exporter = OTLPSpanExporter(endpoint=endpoint)
            otlp_processor = BatchSpanProcessor(otlp_exporter)
            self._tracer_provider.add_span_processor(otlp_processor)
        except Exception as e:
            wprint(f"Failed to setup OTLP exporter: {e}")

    iprint(f"Telemetry enabled for service: {service_name}")
    return self
from_config_file(config_path) classmethod

Create builder from configuration file

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
256
257
258
259
@classmethod
def from_config_file(cls, config_path: str) -> 'FlowAgentBuilder':
    """Create builder from configuration file"""
    return cls(config_path=config_path)
load_config(config_path)

Load agent configuration from file

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def load_config(self, config_path: str) -> AgentConfig:
    """Load agent configuration from file"""
    path = Path(config_path)
    if not path.exists():
        raise FileNotFoundError(f"Config file not found: {config_path}")

    try:
        with open(path, encoding='utf-8') as f:
            if path.suffix.lower() in ['.yaml', '.yml']:
                data = yaml.safe_load(f)
            else:
                data = json.load(f)

        return AgentConfig(**data)

    except Exception as e:
        eprint(f"Failed to load config from {config_path}: {e}")
        raise
load_mcp_tools_from_config(config_path)

Enhanced MCP config loading with automatic session management and full capability extraction

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
def load_mcp_tools_from_config(self, config_path: str | dict) -> 'FlowAgentBuilder':
    """Enhanced MCP config loading with automatic session management and full capability extraction"""
    if not MCP_AVAILABLE:
        wprint("MCP not available, skipping tool loading")
        return self

    if isinstance(config_path, dict):
        mcp_config = config_path
        from toolboxv2 import get_app
        name = self.config.name or "inline_config"
        path = Path(get_app().appdata) / "isaa" / "MPCConfig" / f"{name}.json"
        path.parent.mkdir(parents=True, exist_ok=True)
        path.write_text(json.dumps(mcp_config, indent=2))
        config_path = path
    else:
        config_path = Path(config_path)
        if not config_path.exists():
            raise FileNotFoundError(f"MCP config not found: {config_path}")

        try:
            with open(config_path, encoding='utf-8') as f:
                if config_path.suffix.lower() in ['.yaml', '.yml']:
                    mcp_config = yaml.safe_load(f)
                else:
                    mcp_config = json.load(f)

        except Exception as e:
            eprint(f"Failed to load MCP config from {config_path}: {e}")
            raise

    # Store config for async processing
    self._mcp_config_data = mcp_config
    self.config.mcp.config_path = str(config_path)

    # Mark for processing during build
    self._mcp_needs_loading = True

    iprint(f"MCP config loaded from {config_path}, will process during build")

    return self
save_config(config_path, format='yaml')

Save current configuration to file

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
def save_config(self, config_path: str, format: str = 'yaml'):
    """Save current configuration to file"""
    path = Path(config_path)
    path.parent.mkdir(parents=True, exist_ok=True)

    try:
        data = self.config.model_dump()

        with open(path, 'w', encoding='utf-8') as f:
            if format.lower() == 'yaml':
                yaml.dump(data, f, default_flow_style=False, indent=2)
            else:
                json.dump(data, f, indent=2)

        iprint(f"Configuration saved to {config_path}")

    except Exception as e:
        eprint(f"Failed to save config to {config_path}: {e}")
        raise
set_active_persona(profile_name)

Set active persona profile

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1114
1115
1116
1117
1118
1119
1120
1121
def set_active_persona(self, profile_name: str) -> 'FlowAgentBuilder':
    """Set active persona profile"""
    if profile_name in self.config.persona_profiles:
        self.config.active_persona = profile_name
        iprint(f"Active persona set: {profile_name}")
    else:
        wprint(f"Persona profile not found: {profile_name}")
    return self
validate_config()

Validate the current configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
def validate_config(self) -> dict[str, list[str]]:
    """Validate the current configuration"""
    issues = {"errors": [], "warnings": []}

    # Validate required settings
    if not self.config.fast_llm_model:
        issues["errors"].append("Fast LLM model not specified")
    if not self.config.complex_llm_model:
        issues["errors"].append("Complex LLM model not specified")

    # Validate MCP configuration
    if self.config.mcp.enabled and not MCP_AVAILABLE:
        issues["errors"].append("MCP enabled but MCP not available")

    # Validate A2A configuration
    if self.config.a2a.enabled and not A2A_AVAILABLE:
        issues["errors"].append("A2A enabled but A2A not available")

    # Validate telemetry
    if self.config.telemetry.enabled and not OTEL_AVAILABLE:
        issues["errors"].append("Telemetry enabled but OpenTelemetry not available")

    # Validate personas
    if self.config.active_persona and self.config.active_persona not in self.config.persona_profiles:
        issues["errors"].append(f"Active persona '{self.config.active_persona}' not found in profiles")

    # Validate checkpoint directory
    if self.config.checkpoint.enabled:
        try:
            Path(self.config.checkpoint.checkpoint_dir).mkdir(parents=True, exist_ok=True)
        except Exception as e:
            issues["warnings"].append(f"Cannot create checkpoint directory: {e}")

    return issues
verbose(enable=True)

Enable verbose logging

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
294
295
296
297
298
299
def verbose(self, enable: bool = True) -> 'FlowAgentBuilder':
    """Enable verbose logging"""
    self.config.verbose_logging = enable
    if enable:
        logging.getLogger().setLevel(logging.DEBUG)
    return self
with_analyst_persona(name='Data Analyst')

Add and set a pre-built analyst persona

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
def with_analyst_persona(self, name: str = "Data Analyst") -> 'FlowAgentBuilder':
    """Add and set a pre-built analyst persona"""
    return (self
            .add_persona_profile(
        "analyst",
        name=name,
        style="analytical",
        tone="objective",
        personality_traits=["methodical", "insight_driven", "evidence_based"],
        custom_instructions="Focus on statistical rigor and actionable recommendations.",
        response_format="with-tables",
        text_length="detailed-indepth"
    )
            .set_active_persona("analyst"))
with_assistant_persona(name='AI Assistant')

Add and set a pre-built general assistant persona

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
def with_assistant_persona(self, name: str = "AI Assistant") -> 'FlowAgentBuilder':
    """Add and set a pre-built general assistant persona"""
    return (self
            .add_persona_profile(
        "assistant",
        name=name,
        style="friendly",
        tone="helpful",
        personality_traits=["helpful", "patient", "clear", "adaptive"],
        custom_instructions="Be helpful and adapt communication to user expertise level.",
        response_format="with-bullet-points",
        text_length="chat-conversation"
    )
            .set_active_persona("assistant"))
with_budget_manager(max_cost=10.0)

Enable budget management

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
285
286
287
288
289
290
291
292
def with_budget_manager(self, max_cost: float = 10.0) -> 'FlowAgentBuilder':
    """Enable budget management"""
    if LITELLM_AVAILABLE:
        self._budget_manager = BudgetManager("agent")
        iprint(f"Budget manager enabled: ${max_cost}")
    else:
        wprint("LiteLLM not available, budget manager disabled")
    return self
with_checkpointing(enabled=True, interval_seconds=300, checkpoint_dir='./checkpoints', max_checkpoints=10)

Configure checkpointing

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
def with_checkpointing(self, enabled: bool = True, interval_seconds: int = 300,
                       checkpoint_dir: str = "./checkpoints", max_checkpoints: int = 10) -> 'FlowAgentBuilder':
    """Configure checkpointing"""
    self.config.checkpoint.enabled = enabled
    self.config.checkpoint.interval_seconds = interval_seconds
    self.config.checkpoint.checkpoint_dir = checkpoint_dir
    self.config.checkpoint.max_checkpoints = max_checkpoints

    if enabled:
        # Ensure checkpoint directory exists
        Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
        iprint(f"Checkpointing enabled: {checkpoint_dir} (every {interval_seconds}s)")

    return self
with_creative_persona(name='Creative Assistant')

Add and set a pre-built creative persona

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
def with_creative_persona(self, name: str = "Creative Assistant") -> 'FlowAgentBuilder':
    """Add and set a pre-built creative persona"""
    return (self
            .add_persona_profile(
        "creative",
        name=name,
        style="creative",
        tone="inspiring",
        personality_traits=["imaginative", "expressive", "innovative", "engaging"],
        custom_instructions="Think outside the box and provide creative, inspiring solutions.",
        response_format="md-text",
        text_length="detailed-indepth"
    )
            .set_active_persona("creative"))
with_custom_variables(variables)

Add custom variables

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1200
1201
1202
1203
def with_custom_variables(self, variables: dict[str, Any]) -> 'FlowAgentBuilder':
    """Add custom variables"""
    self.config.custom_variables.update(variables)
    return self
with_developer_persona(name='Senior Developer')

Add and set a pre-built developer persona

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
def with_developer_persona(self, name: str = "Senior Developer") -> 'FlowAgentBuilder':
    """Add and set a pre-built developer persona"""
    return (self
            .add_persona_profile(
        "developer",
        name=name,
        style="technical",
        tone="professional",
        personality_traits=["precise", "thorough", "security_conscious", "best_practices"],
        custom_instructions="Focus on code quality, maintainability, and security. Always consider edge cases.",
        response_format="code-structure",
        text_length="detailed-indepth"
    )
            .set_active_persona("developer"))
with_executive_persona(name='Executive Assistant')

Add and set a pre-built executive persona

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
def with_executive_persona(self, name: str = "Executive Assistant") -> 'FlowAgentBuilder':
    """Add and set a pre-built executive persona"""
    return (self
            .add_persona_profile(
        "executive",
        name=name,
        style="professional",
        tone="authoritative",
        personality_traits=["strategic", "decisive", "results_oriented", "efficient"],
        custom_instructions="Provide strategic insights with executive-level clarity and focus on outcomes.",
        response_format="with-bullet-points",
        text_length="table-conversation"
    )
            .set_active_persona("executive"))
with_models(fast_model, complex_model=None)

Set LLM models

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
268
269
270
271
272
273
def with_models(self, fast_model: str, complex_model: str = None) -> 'FlowAgentBuilder':
    """Set LLM models"""
    self.config.fast_llm_model = fast_model
    if complex_model:
        self.config.complex_llm_model = complex_model
    return self
with_name(name)

Set agent name

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
263
264
265
266
def with_name(self, name: str) -> 'FlowAgentBuilder':
    """Set agent name"""
    self.config.name = name
    return self
with_system_message(message)

Set system message

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
275
276
277
278
def with_system_message(self, message: str) -> 'FlowAgentBuilder':
    """Set system message"""
    self.config.system_message = message
    return self
with_temperature(temp)

Set temperature

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
280
281
282
283
def with_temperature(self, temp: float) -> 'FlowAgentBuilder':
    """Set temperature"""
    self.config.temperature = temp
    return self
with_world_model(world_model)

Set initial world model

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1205
1206
1207
1208
def with_world_model(self, world_model: dict[str, Any]) -> 'FlowAgentBuilder':
    """Set initial world model"""
    self.config.initial_world_model.update(world_model)
    return self
MCPConfig

Bases: BaseModel

MCP server and tools configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
87
88
89
90
91
92
93
94
95
96
97
class MCPConfig(BaseModel):
    """MCP server and tools configuration"""
    model_config = ConfigDict(arbitrary_types_allowed=True)

    enabled: bool = False
    config_path: str = None  # Path to MCP tools config file
    server_name: str = None
    host: str = "0.0.0.0"
    port: int = 8000
    auto_expose_tools: bool = True
    tools_from_config: list[dict[str, Any]] = Field(default_factory=list)
TelemetryConfig

Bases: BaseModel

OpenTelemetry configuration

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
113
114
115
116
117
118
119
120
class TelemetryConfig(BaseModel):
    """OpenTelemetry configuration"""
    enabled: bool = False
    service_name: str = None
    endpoint: str = None  # OTLP endpoint
    console_export: bool = True
    batch_export: bool = True
    sample_rate: float = 1.0
detect_shell()

Detects the best available shell and the argument to execute a command. Returns: A tuple of (shell_executable, command_argument). e.g., ('/bin/bash', '-c') or ('powershell.exe', '-Command')

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def detect_shell() -> tuple[str, str]:
    """
    Detects the best available shell and the argument to execute a command.
    Returns:
        A tuple of (shell_executable, command_argument).
        e.g., ('/bin/bash', '-c') or ('powershell.exe', '-Command')
    """
    if platform.system() == "Windows":
        if shell_path := shutil.which("pwsh"):
            return shell_path, "-Command"
        if shell_path := shutil.which("powershell"):
            return shell_path, "-Command"
        return "cmd.exe", "/c"

    shell_env = os.environ.get("SHELL")
    if shell_env and shutil.which(shell_env):
        return shell_env, "-c"

    for shell in ["bash", "zsh", "sh"]:
        if shell_path := shutil.which(shell):
            return shell_path, "-c"

    return "/bin/sh", "-c"
example_production_usage() async

Production usage example with full features

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
async def example_production_usage():
    """Production usage example with full features"""

    iprint("=== Production FlowAgent Builder Example ===")

    # Example 1: Developer agent with full MCP integration
    iprint("Creating developer agent with MCP integration...")

    # Add a custom tool
    def get_system_info():
        """Get basic system information"""
        import platform
        return {
            "platform": platform.platform(),
            "python_version": platform.python_version(),
            "architecture": platform.architecture()
        }

    developer_agent = await (FlowAgentBuilder
                             .create_developer_agent("ProductionDev", with_mcp=True, with_a2a=True)
                             .add_tool(get_system_info, "get_system_info", "Get system information")
                             .enable_telemetry(console_export=True)
                             .with_custom_variables({
        "project_name": "FlowAgent Production",
        "environment": "production"
    })
                             .build())

    # Test the developer agent
    dev_response = await developer_agent.a_run(
        "Hello! I'm working on {{ project_name }}. Can you tell me about the system and create a simple Python function?"
    )
    iprint(f"Developer agent response: {dev_response[:200]}...")

    # Example 2: Load from configuration file
    iprint("\nTesting configuration save/load...")

    # Save current config
    config_path = "/tmp/production_agent_config.yaml"
    builder = FlowAgentBuilder.create_analyst_agent("ConfigTestAgent")
    builder.save_config(config_path)

    # Load from config
    loaded_builder = FlowAgentBuilder.from_config_file(config_path)
    config_agent = await loaded_builder.build()

    config_response = await config_agent.a_run("Analyze this data: [1, 2, 3, 4, 5]")
    iprint(f"Config-loaded agent response: {config_response[:150]}...")

    # Example 3: Agent with MCP tools from config
    iprint("\nTesting MCP tools integration...")

    # Create a sample MCP config
    mcp_config = {
        "tools": [
            {
                "name": "weather_checker",
                "description": "Check weather for a location",
                "function_code": '''
async def weather_checker(location: str) -> str:
    """Mock weather checker"""
    import random
    conditions = ["sunny", "cloudy", "rainy", "snowy"]
    temp = random.randint(-10, 35)
    condition = random.choice(conditions)
    return f"Weather in {location}: {condition}, {temp}°C"
'''
            }
        ]
    }

    mcp_config_path = "/tmp/mcp_tools_config.json"
    with open(mcp_config_path, 'w') as f:
        json.dump(mcp_config, f, indent=2)

    mcp_agent = await (FlowAgentBuilder()
                       .with_name("MCPTestAgent")
                       .with_assistant_persona()
                       .enable_mcp_server(port=8002)
                       .load_mcp_tools_from_config(mcp_config_path)
                       .build())

    mcp_response = await mcp_agent.a_run("What's the weather like in Berlin?")
    iprint(f"MCP agent response: {mcp_response[:150]}...")

    # Show agent status
    iprint("\n=== Agent Status ===")
    status = developer_agent.status(pretty_print=False)
    iprint(f"Developer agent tools: {len(status['capabilities']['tool_names'])}")
    iprint(f"MCP agent tools: {len(mcp_agent.shared.get('available_tools', []))}")

    # Cleanup
    await developer_agent.close()
    await config_agent.close()
    await mcp_agent.close()

    iprint("Production example completed successfully!")
example_quick_start() async

Quick start examples for common scenarios

Source code in toolboxv2/mods/isaa/base/Agent/builder.py
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
async def example_quick_start():
    """Quick start examples for common scenarios"""

    iprint("=== Quick Start Examples ===")

    # 1. Simple developer agent
    dev_agent = await FlowAgentBuilder.create_developer_agent("QuickDev").build()
    response1 = await dev_agent.a_run("Create a Python function to validate email addresses")
    iprint(f"Quick dev response: {response1[:100]}...")
    await dev_agent.close()

    # 2. Analyst with custom data
    analyst_agent = await (FlowAgentBuilder
                           .create_analyst_agent("QuickAnalyst")
                           .with_custom_variables({"dataset": "sales_data_2024"})
                           .build())
    response2 = await analyst_agent.a_run("Analyze the trends in {{ dataset }}")
    iprint(f"Quick analyst response: {response2[:100]}...")
    await analyst_agent.close()

    # 3. Creative assistant
    creative_agent = await FlowAgentBuilder.create_creative_agent("QuickCreative").build()
    response3 = await creative_agent.a_run("Write a creative story about AI agents collaborating")
    iprint(f"Quick creative response: {response3[:100]}...")
    await creative_agent.close()

    iprint("Quick start examples completed!")
chain
CF

Chain Format - handles formatting and data extraction between tasks.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
class CF:
    """Chain Format - handles formatting and data extraction between tasks."""

    def __init__(self, format_class: type[BaseModel]):
        self.format_class = format_class
        self.extract_key: str | tuple | None = None
        self.is_parallel_extraction = False

    def __sub__(self, key: str | tuple):
        """Implements the - operator for data extraction keys."""
        new_cf = copy.copy(self)
        if isinstance(key, str):
            if '[n]' in key:
                new_cf.extract_key = key.replace('[n]', '')
                new_cf.is_parallel_extraction = True
            else:
                new_cf.extract_key = key
        elif isinstance(key, tuple):
            new_cf.extract_key = key
        return new_cf
__sub__(key)

Implements the - operator for data extraction keys.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
24
25
26
27
28
29
30
31
32
33
34
35
def __sub__(self, key: str | tuple):
    """Implements the - operator for data extraction keys."""
    new_cf = copy.copy(self)
    if isinstance(key, str):
        if '[n]' in key:
            new_cf.extract_key = key.replace('[n]', '')
            new_cf.is_parallel_extraction = True
        else:
            new_cf.extract_key = key
    elif isinstance(key, tuple):
        new_cf.extract_key = key
    return new_cf
Chain

Bases: ChainBase

The main class for creating and executing sequential chains of tasks.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
class Chain(ChainBase):
    """The main class for creating and executing sequential chains of tasks."""

    def __init__(self, agent: 'FlowAgent' = None):
        self.tasks: list[Any] = [agent] if agent else []
        self.progress_tracker: ProgressTracker | None = None

    @classmethod
    def _create_chain(cls, components: list[Any]) -> 'Chain':
        chain = cls()
        chain.tasks = components
        return chain

    def _extract_data(self, data: dict, cf: CF) -> Any:
        """Extracts data from a dictionary based on the CF configuration."""
        if not isinstance(data, dict):
            return data

        key = cf.extract_key
        if key == '*':
            return data
        if isinstance(key, tuple):
            return {k: data.get(k) for k in key if k in data}
        if isinstance(key, str) and key in data:
            return data[key]
        return data  # Return original data if key not found

    async def a_run(self, query: Any, **kwargs):
        """
        Executes the chain of tasks asynchronously with dynamic method selection,
        data extraction, and auto-parallelization.
        """
        current_data = query

        # We need to iterate with an index to look ahead
        i = 0
        while i < len(self.tasks):
            task = self.tasks[i]

            # --- Auto-Erkennung und Ausführung ---
            if hasattr(task, 'a_run') and hasattr(task, 'a_format_class'):
                next_task = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
                task.active_session = kwargs.get("session_id", "default")
                # Dynamische Entscheidung: a_format_class oder a_run aufrufen?
                if isinstance(next_task, CF):
                    # Nächste Aufgabe ist Formatierung, also a_format_class aufrufen
                    current_data = await task.a_format_class(
                        next_task.format_class, str(current_data), **kwargs
                    )
                else:
                    # Standardausführung
                    current_data = await task.a_run(str(current_data), **kwargs)
                task.active_session = None

            elif isinstance(task, CF):
                # --- Auto-Extraktion und Parallelisierung ---
                if task.extract_key:
                    extracted_data = self._extract_data(current_data, task)

                    if task.is_parallel_extraction and isinstance(extracted_data, list):
                        next_task_for_parallel = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
                        if next_task_for_parallel:
                            # Erstelle eine temporäre Parallel-Kette und führe sie aus
                            parallel_runner = ParallelChain([next_task_for_parallel] * len(extracted_data))

                            # Führe jeden Task mit dem entsprechenden Datenelement aus
                            parallel_tasks = [
                                next_task_for_parallel.a_run(item, **kwargs) for item in extracted_data
                            ]
                            current_data = await asyncio.gather(*parallel_tasks)

                            # Überspringe die nächste Aufgabe, da sie bereits parallel ausgeführt wurde
                            i += 1
                        else:
                            current_data = extracted_data
                    else:
                        current_data = extracted_data
                else:
                    # Keine Extraktion, Daten bleiben unverändert (CF dient nur als Marker)
                    pass

            elif isinstance(task, ParallelChain | ConditionalChain | ErrorHandlingChain):
                current_data = await task.a_run(current_data, **kwargs)

            elif isinstance(task, IS):
                # IS needs to be paired with >> to form a ConditionalChain
                next_task_for_cond = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
                if next_task_for_cond:
                    # Form a conditional chain on the fly
                    conditional_task = ConditionalChain(task, next_task_for_cond)
                    # Check for a false branch defined with %
                    next_next_task = self.tasks[i + 2] if (i + 2) < len(self.tasks) else None
                    if isinstance(next_next_task, ConditionalChain) and next_next_task.false_branch:
                        conditional_task.false_branch = next_next_task.false_branch
                        i += 1  # also skip the false branch marker

                    current_data = await conditional_task.a_run(current_data, **kwargs)
                    i += 1  # Skip the next task as it's part of the conditional
                else:
                    raise ValueError("IS condition must be followed by a task to execute.")

            i += 1  # Gehe zur nächsten Aufgabe

        return current_data
a_run(query, **kwargs) async

Executes the chain of tasks asynchronously with dynamic method selection, data extraction, and auto-parallelization.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
async def a_run(self, query: Any, **kwargs):
    """
    Executes the chain of tasks asynchronously with dynamic method selection,
    data extraction, and auto-parallelization.
    """
    current_data = query

    # We need to iterate with an index to look ahead
    i = 0
    while i < len(self.tasks):
        task = self.tasks[i]

        # --- Auto-Erkennung und Ausführung ---
        if hasattr(task, 'a_run') and hasattr(task, 'a_format_class'):
            next_task = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
            task.active_session = kwargs.get("session_id", "default")
            # Dynamische Entscheidung: a_format_class oder a_run aufrufen?
            if isinstance(next_task, CF):
                # Nächste Aufgabe ist Formatierung, also a_format_class aufrufen
                current_data = await task.a_format_class(
                    next_task.format_class, str(current_data), **kwargs
                )
            else:
                # Standardausführung
                current_data = await task.a_run(str(current_data), **kwargs)
            task.active_session = None

        elif isinstance(task, CF):
            # --- Auto-Extraktion und Parallelisierung ---
            if task.extract_key:
                extracted_data = self._extract_data(current_data, task)

                if task.is_parallel_extraction and isinstance(extracted_data, list):
                    next_task_for_parallel = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
                    if next_task_for_parallel:
                        # Erstelle eine temporäre Parallel-Kette und führe sie aus
                        parallel_runner = ParallelChain([next_task_for_parallel] * len(extracted_data))

                        # Führe jeden Task mit dem entsprechenden Datenelement aus
                        parallel_tasks = [
                            next_task_for_parallel.a_run(item, **kwargs) for item in extracted_data
                        ]
                        current_data = await asyncio.gather(*parallel_tasks)

                        # Überspringe die nächste Aufgabe, da sie bereits parallel ausgeführt wurde
                        i += 1
                    else:
                        current_data = extracted_data
                else:
                    current_data = extracted_data
            else:
                # Keine Extraktion, Daten bleiben unverändert (CF dient nur als Marker)
                pass

        elif isinstance(task, ParallelChain | ConditionalChain | ErrorHandlingChain):
            current_data = await task.a_run(current_data, **kwargs)

        elif isinstance(task, IS):
            # IS needs to be paired with >> to form a ConditionalChain
            next_task_for_cond = self.tasks[i + 1] if (i + 1) < len(self.tasks) else None
            if next_task_for_cond:
                # Form a conditional chain on the fly
                conditional_task = ConditionalChain(task, next_task_for_cond)
                # Check for a false branch defined with %
                next_next_task = self.tasks[i + 2] if (i + 2) < len(self.tasks) else None
                if isinstance(next_next_task, ConditionalChain) and next_next_task.false_branch:
                    conditional_task.false_branch = next_next_task.false_branch
                    i += 1  # also skip the false branch marker

                current_data = await conditional_task.a_run(current_data, **kwargs)
                i += 1  # Skip the next task as it's part of the conditional
            else:
                raise ValueError("IS condition must be followed by a task to execute.")

        i += 1  # Gehe zur nächsten Aufgabe

    return current_data
ChainBase

Abstract base class for all chain types, providing common operators.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
class ChainBase:
    """Abstract base class for all chain types, providing common operators."""

    def __rshift__(self, other: Any) -> 'Chain':
        """Implements the >> operator to chain tasks sequentially."""
        if isinstance(self, Chain):
            new_tasks = self.tasks + [other]
            return Chain._create_chain(new_tasks)
        return Chain._create_chain([self, other])

    def __add__(self, other: Any) -> 'ParallelChain':
        """Implements the + operator for parallel execution."""
        return ParallelChain([self, other])

    def __and__(self, other: Any) -> 'ParallelChain':
        """Implements the & operator, an alias for parallel execution."""
        return ParallelChain([self, other])

    def __or__(self, other: Any) -> 'ErrorHandlingChain':
        """Implements the | operator for defining a fallback/error handling path."""
        return ErrorHandlingChain(self, other)

    def __mod__(self, other: Any) -> 'ConditionalChain':
        """Implements the % operator for defining a false/else branch in a condition."""
        # This is typically used after a conditional chain.
        if isinstance(self, ConditionalChain):
            self.false_branch = other
            return self
        # Allows creating a conditional chain directly
        return ConditionalChain(None, self, other)

    def set_progress_callback(self, progress_tracker: 'ProgressTracker'):
        """Recursively sets the progress callback for all tasks in the chain."""
        tasks_to_process = []
        if hasattr(self, 'tasks'): tasks_to_process.extend(self.tasks)  # Chain
        if hasattr(self, 'agents'): tasks_to_process.extend(self.agents)  # ParallelChain
        if hasattr(self, 'true_branch'): tasks_to_process.append(self.true_branch)  # ConditionalChain
        if hasattr(self, 'false_branch') and self.false_branch: tasks_to_process.append(
            self.false_branch)  # ConditionalChain
        if hasattr(self, 'primary'): tasks_to_process.append(self.primary)  # ErrorHandlingChain
        if hasattr(self, 'fallback'): tasks_to_process.append(self.fallback)  # ErrorHandlingChain

        for task in tasks_to_process:
            if hasattr(task, 'set_progress_callback'):
                task.set_progress_callback(progress_tracker)

    def __call__(self, *args, **kwargs):
        """Allows the chain to be called like a function, returning an awaitable runner."""
        return self._Runner(self, args, kwargs)

    class _Runner:
        def __init__(self, parent, args, kwargs):
            self.parent = parent
            self.args = args
            self.kwargs = kwargs

        def __call__(self):
            """Synchronous execution."""
            return asyncio.run(self.parent.a_run(*self.args, **self.kwargs))

        def __await__(self):
            """Asynchronous execution."""
            return self.parent.a_run(*self.args, **self.kwargs).__await__()
__add__(other)

Implements the + operator for parallel execution.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
58
59
60
def __add__(self, other: Any) -> 'ParallelChain':
    """Implements the + operator for parallel execution."""
    return ParallelChain([self, other])
__and__(other)

Implements the & operator, an alias for parallel execution.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
62
63
64
def __and__(self, other: Any) -> 'ParallelChain':
    """Implements the & operator, an alias for parallel execution."""
    return ParallelChain([self, other])
__call__(*args, **kwargs)

Allows the chain to be called like a function, returning an awaitable runner.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
94
95
96
def __call__(self, *args, **kwargs):
    """Allows the chain to be called like a function, returning an awaitable runner."""
    return self._Runner(self, args, kwargs)
__mod__(other)

Implements the % operator for defining a false/else branch in a condition.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
70
71
72
73
74
75
76
77
def __mod__(self, other: Any) -> 'ConditionalChain':
    """Implements the % operator for defining a false/else branch in a condition."""
    # This is typically used after a conditional chain.
    if isinstance(self, ConditionalChain):
        self.false_branch = other
        return self
    # Allows creating a conditional chain directly
    return ConditionalChain(None, self, other)
__or__(other)

Implements the | operator for defining a fallback/error handling path.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
66
67
68
def __or__(self, other: Any) -> 'ErrorHandlingChain':
    """Implements the | operator for defining a fallback/error handling path."""
    return ErrorHandlingChain(self, other)
__rshift__(other)

Implements the >> operator to chain tasks sequentially.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
51
52
53
54
55
56
def __rshift__(self, other: Any) -> 'Chain':
    """Implements the >> operator to chain tasks sequentially."""
    if isinstance(self, Chain):
        new_tasks = self.tasks + [other]
        return Chain._create_chain(new_tasks)
    return Chain._create_chain([self, other])
set_progress_callback(progress_tracker)

Recursively sets the progress callback for all tasks in the chain.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def set_progress_callback(self, progress_tracker: 'ProgressTracker'):
    """Recursively sets the progress callback for all tasks in the chain."""
    tasks_to_process = []
    if hasattr(self, 'tasks'): tasks_to_process.extend(self.tasks)  # Chain
    if hasattr(self, 'agents'): tasks_to_process.extend(self.agents)  # ParallelChain
    if hasattr(self, 'true_branch'): tasks_to_process.append(self.true_branch)  # ConditionalChain
    if hasattr(self, 'false_branch') and self.false_branch: tasks_to_process.append(
        self.false_branch)  # ConditionalChain
    if hasattr(self, 'primary'): tasks_to_process.append(self.primary)  # ErrorHandlingChain
    if hasattr(self, 'fallback'): tasks_to_process.append(self.fallback)  # ErrorHandlingChain

    for task in tasks_to_process:
        if hasattr(task, 'set_progress_callback'):
            task.set_progress_callback(progress_tracker)
ConditionalChain

Bases: ChainBase

Handles conditional execution based on a condition.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
class ConditionalChain(ChainBase):
    """Handles conditional execution based on a condition."""

    def __init__(self, condition: IS, true_branch: Any, false_branch: Any = None):
        self.condition = condition
        self.true_branch = true_branch
        self.false_branch = false_branch

    async def a_run(self, data: Any, **kwargs):
        """Executes the true or false branch based on the condition."""
        condition_met = False
        if isinstance(self.condition, IS) and isinstance(data, dict):
            if data.get(self.condition.key) == self.condition.expected_value:
                condition_met = True

        if condition_met:
            return await self.true_branch.a_run(data, **kwargs)
        elif self.false_branch:
            return await self.false_branch.a_run(data, **kwargs)
        return data  # Return original data if condition not met and no false branch
a_run(data, **kwargs) async

Executes the true or false branch based on the condition.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
140
141
142
143
144
145
146
147
148
149
150
151
async def a_run(self, data: Any, **kwargs):
    """Executes the true or false branch based on the condition."""
    condition_met = False
    if isinstance(self.condition, IS) and isinstance(data, dict):
        if data.get(self.condition.key) == self.condition.expected_value:
            condition_met = True

    if condition_met:
        return await self.true_branch.a_run(data, **kwargs)
    elif self.false_branch:
        return await self.false_branch.a_run(data, **kwargs)
    return data  # Return original data if condition not met and no false branch
ErrorHandlingChain

Bases: ChainBase

Handles exceptions in a primary chain by executing a fallback chain.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
class ErrorHandlingChain(ChainBase):
    """Handles exceptions in a primary chain by executing a fallback chain."""

    def __init__(self, primary: Any, fallback: Any):
        self.primary = primary
        self.fallback = fallback

    async def a_run(self, query: Any, **kwargs):
        """Tries the primary chain and executes the fallback on failure."""
        try:
            return await self.primary.a_run(query, **kwargs)
        except Exception as e:
            print(f"Primary chain failed with error: {e}. Running fallback.")
            return await self.fallback.a_run(query, **kwargs)
a_run(query, **kwargs) async

Tries the primary chain and executes the fallback on failure.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
161
162
163
164
165
166
167
async def a_run(self, query: Any, **kwargs):
    """Tries the primary chain and executes the fallback on failure."""
    try:
        return await self.primary.a_run(query, **kwargs)
    except Exception as e:
        print(f"Primary chain failed with error: {e}. Running fallback.")
        return await self.fallback.a_run(query, **kwargs)
IS

Conditional check for branching logic.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
38
39
40
41
42
43
class IS:
    """Conditional check for branching logic."""

    def __init__(self, key: str, expected_value: Any):
        self.key = key
        self.expected_value = expected_value
ParallelChain

Bases: ChainBase

Handles parallel execution of multiple agents or chains.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
class ParallelChain(ChainBase):
    """Handles parallel execution of multiple agents or chains."""

    def __init__(self, agents: list[Union['FlowAgent', ChainBase]]):
        self.agents = agents

    async def a_run(self, query: Any, **kwargs):
        """Runs all agents/chains in parallel."""
        tasks = [agent.a_run(query, **kwargs) for agent in self.agents]
        results = await asyncio.gather(*tasks)
        return self._combine_results(results)

    def _combine_results(self, results: list[Any]) -> Any:
        """Intelligently combines parallel results."""
        if all(isinstance(r, str) for r in results):
            return " | ".join(results)
        return results
a_run(query, **kwargs) async

Runs all agents/chains in parallel.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
119
120
121
122
123
async def a_run(self, query: Any, **kwargs):
    """Runs all agents/chains in parallel."""
    tasks = [agent.a_run(query, **kwargs) for agent in self.agents]
    results = await asyncio.gather(*tasks)
    return self._combine_results(results)
chain_to_graph(self)

Convert chain to hierarchical structure with complete component detection.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
def chain_to_graph(self) -> dict[str, Any]:
    """Convert chain to hierarchical structure with complete component detection."""

    def process_component(comp, depth=0, visited=None):
        if visited is None:
            visited = set()

        # Prevent infinite recursion
        comp_id = id(comp)
        if comp_id in visited or depth > 20:
            return {"type": "Circular", "display": "[CIRCULAR_REF]", "depth": depth}
        visited.add(comp_id)

        if comp is None:
            return {"type": "Error", "display": "[NULL]", "depth": depth}

        try:
            # Agent detection
            if hasattr(comp, 'amd') and comp.amd:
                return {
                    "type": "Agent",
                    "display": f"[Agent] {comp.amd.name}",
                    "name": comp.amd.name,
                    "depth": depth
                }

            # Format detection (CF) with parallel detection
            if hasattr(comp, 'format_class'):
                name = comp.format_class.__name__
                display = f"[Format] {name}"

                result = {
                    "type": "Format",
                    "display": display,
                    "format_class": name,
                    "extract_key": getattr(comp, 'extract_key', None),
                    "depth": depth,
                    "creates_parallel": False
                }

                # Extract key visualization
                if hasattr(comp, 'extract_key') and comp.extract_key:
                    key = comp.extract_key
                    if key == '*':
                        display += " \033[90m(*all*)\033[0m"
                    elif isinstance(key, str):
                        display += f" \033[90m(→{key})\033[0m"
                    elif isinstance(key, tuple):
                        display += f" \033[90m(→{','.join(key)})\033[0m"

                # Parallel detection
                if hasattr(comp, 'parallel_count') and comp.parallel_count == 'n':
                    display += " \033[95m[PARALLEL]\033[0m"
                    result["creates_parallel"] = True
                    result["parallel_type"] = "auto_n"

                result["display"] = display
                return result

            # Condition detection (IS)
            if hasattr(comp, 'key') and hasattr(comp, 'expected_value'):
                return {
                    "type": "Condition",
                    "display": f"[Condition] IS {comp.key}=='{comp.expected_value}'",
                    "condition_key": comp.key,
                    "expected_value": comp.expected_value,
                    "depth": depth
                }

            # Parallel chain detection
            if hasattr(comp, 'agents') and isinstance(comp.agents, list | tuple):
                branches = []
                for i, agent in enumerate(comp.agents):
                    if agent:
                        branch_data = process_component(agent, depth + 1, visited.copy())
                        branch_data["branch_id"] = i
                        branches.append(branch_data)

                return {
                    "type": "Parallel",
                    "display": f"[Parallel] {len(branches)} branches",
                    "branches": branches,
                    "branch_count": len(branches),
                    "execution_type": "concurrent",
                    "depth": depth
                }

            # Conditional chain detection
            if hasattr(comp, 'condition') and hasattr(comp, 'true_branch'):
                condition_data = process_component(comp.condition, depth + 1,
                                                   visited.copy()) if comp.condition else None
                true_data = process_component(comp.true_branch, depth + 1, visited.copy()) if comp.true_branch else None
                false_data = None

                if hasattr(comp, 'false_branch') and comp.false_branch:
                    false_data = process_component(comp.false_branch, depth + 1, visited.copy())

                return {
                    "type": "Conditional",
                    "display": "[Conditional] Branch Logic",
                    "condition": condition_data,
                    "true_branch": true_data,
                    "false_branch": false_data,
                    "has_false_branch": false_data is not None,
                    "depth": depth
                }

            # Error handling detection
            if hasattr(comp, 'primary') and hasattr(comp, 'fallback'):
                primary_data = process_component(comp.primary, depth + 1, visited.copy()) if comp.primary else None
                fallback_data = process_component(comp.fallback, depth + 1, visited.copy()) if comp.fallback else None

                return {
                    "type": "ErrorHandling",
                    "display": "[Try-Catch] Error Handler",
                    "primary": primary_data,
                    "fallback": fallback_data,
                    "has_fallback": fallback_data is not None,
                    "depth": depth
                }

            # Regular chain detection
            if hasattr(comp, 'tasks') and isinstance(comp.tasks, list | tuple):
                tasks = []
                for i, task in enumerate(comp.tasks):
                    if task is not None:
                        task_data = process_component(task, depth + 1, visited.copy())
                        task_data["task_id"] = i
                        tasks.append(task_data)

                # Analyze chain characteristics
                has_conditionals = any(t.get("type") == "Conditional" for t in tasks)
                has_parallels = any(t.get("type") == "Parallel" for t in tasks)
                has_error_handling = any(t.get("type") == "ErrorHandling" for t in tasks)
                has_auto_parallel = any(t.get("creates_parallel", False) for t in tasks)

                chain_type = "Sequential"
                if has_auto_parallel:
                    chain_type = "Auto-Parallel"
                elif has_conditionals and has_parallels:
                    chain_type = "Complex"
                elif has_conditionals:
                    chain_type = "Conditional"
                elif has_parallels:
                    chain_type = "Mixed-Parallel"
                elif has_error_handling:
                    chain_type = "Error-Handling"

                return {
                    "type": "Chain",
                    "display": f"[Chain] {chain_type}",
                    "tasks": tasks,
                    "task_count": len(tasks),
                    "chain_type": chain_type,
                    "has_conditionals": has_conditionals,
                    "has_parallels": has_parallels,
                    "has_error_handling": has_error_handling,
                    "has_auto_parallel": has_auto_parallel,
                    "depth": depth
                }

            # Fallback for unknown types
            return {
                "type": "Unknown",
                "display": f"[Unknown] {type(comp).__name__}",
                "class_name": type(comp).__name__,
                "depth": depth
            }

        except Exception as e:
            return {
                "type": "Error",
                "display": f"[ERROR] {str(e)[:50]}",
                "error": str(e),
                "depth": depth
            }
        finally:
            visited.discard(comp_id)

    return {"structure": process_component(self)}
print_graph(self)

Enhanced chain visualization with complete functionality coverage and parallel detection.

Source code in toolboxv2/mods/isaa/base/Agent/chain.py
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
def print_graph(self):
    """Enhanced chain visualization with complete functionality coverage and parallel detection."""

    # Enhanced color scheme with parallel indicators
    COLORS = {
        "Agent": "\033[94m",  # Blue
        "Format": "\033[92m",  # Green
        "Condition": "\033[93m",  # Yellow
        "Parallel": "\033[95m",  # Magenta
        "Conditional": "\033[96m",  # Cyan
        "ErrorHandling": "\033[91m",  # Red
        "Chain": "\033[97m",  # White
        "Unknown": "\033[31m",  # Dark Red
        "Error": "\033[91m",  # Red
        "AutoParallel": "\033[105m",  # Bright Magenta Background
    }
    RESET = "\033[0m"
    BOLD = "\033[1m"
    DIM = "\033[2m"
    PARALLEL_ICON = "⚡"
    BRANCH_ICON = "🔀"
    ERROR_ICON = "🚨"

    def style_component(comp, override_color=None):
        """Apply enhanced styling with parallel indicators."""
        if not comp:
            return f"{COLORS['Error']}[NULL]{RESET}"

        comp_type = comp.get("type", "Unknown")
        display = comp.get("display", f"[{comp_type}]")

        # Special handling for parallel-creating formats
        if comp_type == "Format" and comp.get("creates_parallel", False):
            color = override_color or COLORS["AutoParallel"]
            return f"{color}{PARALLEL_ICON} {display}{RESET}"
        else:
            color = override_color or COLORS.get(comp_type, COLORS['Unknown'])
            return f"{color}{display}{RESET}"

    def print_section_header(title, details=None):
        """Print formatted section header."""
        print(f"\n{BOLD}{'=' * 60}{RESET}")
        print(f"{BOLD}🔗 {title}{RESET}")
        if details:
            print(f"{DIM}{details}{RESET}")
        print(f"{BOLD}{'=' * 60}{RESET}")

    def render_task_flow(tasks, indent="", show_parallel_creation=True):
        """Render tasks with parallel creation detection."""
        if not tasks:
            print(f"{indent}{DIM}(No tasks){RESET}")
            return

        for i, task in enumerate(tasks):
            if not task:
                continue

            is_last = i == len(tasks) - 1
            connector = "└─ " if is_last else "├─ "
            next_indent = indent + ("    " if is_last else "│   ")

            task_type = task.get("type", "Unknown")

            # Handle different task types
            if task_type == "Format" and task.get("creates_parallel", False):
                print(f"{indent}{connector}{style_component(task)}")

                # Show what happens next
                if i + 1 < len(tasks):
                    next_task = tasks[i + 1]
                    print(f"{next_indent}├─ {DIM}Creates parallel execution for:{RESET}")
                    print(f"{next_indent}└─ {PARALLEL_ICON} {style_component(next_task)}")
                    # Skip the next task in main loop since we showed it here
                    continue

            elif task_type == "Parallel":
                print(f"{indent}{connector}{style_component(task)}")
                branches = task.get("branches", [])

                for j, branch in enumerate(branches):
                    if branch:
                        branch_last = j == len(branches) - 1
                        branch_conn = "└─ " if branch_last else "├─ "
                        branch_indent = next_indent + ("    " if branch_last else "│   ")

                        print(f"{next_indent}{branch_conn}{BRANCH_ICON} Branch {j + 1}:")

                        if branch.get("type") == "Chain":
                            render_task_flow(branch.get("tasks", []), branch_indent, False)
                        else:
                            print(f"{branch_indent}└─ {style_component(branch)}")

            elif task_type == "Conditional":
                print(f"{indent}{connector}{style_component(task)}")

                # Condition
                condition = task.get("condition")
                if condition:
                    print(f"{next_indent}├─ {style_component(condition)}")

                # True branch
                true_branch = task.get("true_branch")
                false_branch = task.get("false_branch")
                has_false = false_branch is not None

                if true_branch:
                    true_conn = "├─ " if has_false else "└─ "
                    print(f"{next_indent}{true_conn}{COLORS['Conditional']}✓ TRUE:{RESET}")
                    true_indent = next_indent + ("│   " if has_false else "    ")

                    if true_branch.get("type") == "Chain":
                        render_task_flow(true_branch.get("tasks", []), true_indent, False)
                    else:
                        print(f"{true_indent}└─ {style_component(true_branch)}")

                if false_branch:
                    print(f"{next_indent}└─ {COLORS['Conditional']}✗ FALSE:{RESET}")
                    false_indent = next_indent + "    "

                    if false_branch.get("type") == "Chain":
                        render_task_flow(false_branch.get("tasks", []), false_indent, False)
                    else:
                        print(f"{false_indent}└─ {style_component(false_branch)}")

            elif task_type == "ErrorHandling":
                print(f"{indent}{connector}{style_component(task)}")

                primary = task.get("primary")
                fallback = task.get("fallback")
                has_fallback = fallback is not None

                if primary:
                    prim_conn = "├─ " if has_fallback else "└─ "
                    print(f"{next_indent}{prim_conn}{COLORS['Chain']}🎯 PRIMARY:{RESET}")
                    prim_indent = next_indent + ("│   " if has_fallback else "    ")

                    if primary.get("type") == "Chain":
                        render_task_flow(primary.get("tasks", []), prim_indent, False)
                    else:
                        print(f"{prim_indent}└─ {style_component(primary)}")

                if fallback:
                    print(f"{next_indent}└─ {ERROR_ICON} FALLBACK:")
                    fallback_indent = next_indent + "    "

                    if fallback.get("type") == "Chain":
                        render_task_flow(fallback.get("tasks", []), fallback_indent, False)
                    else:
                        print(f"{fallback_indent}└─ {style_component(fallback)}")

            else:
                print(f"{indent}{connector}{style_component(task)}")

    # Main execution
    try:
        # Generate graph structure
        graph_data = self.chain_to_graph()
        structure = graph_data.get("structure")

        if not structure:
            print_section_header("Empty Chain")
            return

        # Determine chain characteristics
        chain_type = structure.get("chain_type", "Unknown")
        has_auto_parallel = structure.get("has_auto_parallel", False)
        has_parallels = structure.get("has_parallels", False)
        has_conditionals = structure.get("has_conditionals", False)
        has_error_handling = structure.get("has_error_handling", False)
        task_count = structure.get("task_count", 0)

        # Build header info
        info_parts = [f"Tasks: {task_count}"]
        if has_auto_parallel:
            info_parts.append(f"{PARALLEL_ICON} Auto-Parallel")
        if has_parallels:
            info_parts.append(f"{BRANCH_ICON} Parallel Branches")
        if has_conditionals:
            info_parts.append("🔀 Conditionals")
        if has_error_handling:
            info_parts.append(f"{ERROR_ICON} Error Handling")

        print_section_header(f"Chain Visualization - {chain_type}", " | ".join(info_parts))

        # Handle different structure types
        struct_type = structure.get("type", "Unknown")

        if struct_type == "Chain":
            tasks = structure.get("tasks", [])
            render_task_flow(tasks)

        elif struct_type == "Parallel":
            print(f"{style_component(structure)}")
            branches = structure.get("branches", [])
            for i, branch in enumerate(branches):
                is_last = i == len(branches) - 1
                conn = "└─ " if is_last else "├─ "
                indent = "    " if is_last else "│   "

                print(f"{conn}{BRANCH_ICON} Branch {i + 1}:")
                if branch.get("type") == "Chain":
                    render_task_flow(branch.get("tasks", []), indent, False)
                else:
                    print(f"{indent}└─ {style_component(branch)}")

        elif struct_type == "Conditional" or struct_type == "ErrorHandling":
            render_task_flow([structure])

        else:
            print(f"└─ {style_component(structure)}")

        print(f"\n{DIM}{'─' * 60}{RESET}")

    except Exception as e:
        print(f"\n{COLORS['Error']}{BOLD}[VISUALIZATION ERROR]{RESET}")
        print(f"{COLORS['Error']}Error: {str(e)}{RESET}")

        # Emergency fallback
        print(f"\n{DIM}--- Emergency Info ---{RESET}")
        try:
            attrs = []
            for attr in ['tasks', 'agents', 'condition', 'true_branch', 'false_branch', 'primary', 'fallback']:
                if hasattr(self, attr):
                    val = getattr(self, attr)
                    if val is not None:
                        if isinstance(val, list | tuple):
                            attrs.append(f"{attr}: {len(val)} items")
                        else:
                            attrs.append(f"{attr}: {type(val).__name__}")

            if attrs:
                print("Chain attributes:")
                for attr in attrs:
                    print(f"  • {attr}")
        except:
            print("Complete inspection failed")

        print(f"{DIM}--- End Emergency Info ---{RESET}\n")
config
A2AConfig

Bases: BaseModel

Configuration for A2A integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
116
117
118
119
120
121
122
class A2AConfig(BaseModel):
    """Configuration for A2A integration."""
    server: dict[str, Any] | None = Field(default=None, description="Configuration to run an A2A server (host, port, etc.).")
    known_agents: dict[str, str] = Field(default_factory=dict, description="Named A2A agent URLs to interact with (e.g., {'weather_agent': 'http://weather:5000'}).")
    default_task_timeout: int = Field(default=120, description="Default timeout in seconds for waiting on A2A task results.")

    model_config = ConfigDict(arbitrary_types_allowed=True)
ADKConfig

Bases: BaseModel

Configuration for ADK integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
class ADKConfig(BaseModel):
    """Configuration for ADK integration."""
    enabled: bool = Field(default=True, description="Enable ADK features if ADK is installed.")
    description: str | None = Field(default=None, description="ADK LlmAgent description.")
    instruction_override: str | None = Field(default=None, description="Override agent's system message for ADK.")
    # Tools added via builder or auto-discovery
    code_executor: str | BaseCodeExecutor | None = Field(default=None, description="Reference name or instance of ADK code executor.")
    planner: str | BasePlanner | None = Field(default=None, description="Reference name or instance of ADK planner.")
    examples: list[Example] | None = Field(default=None, description="Few-shot examples for ADK.")
    output_schema: type[BaseModel] | None = Field(default=None, description="Pydantic model for structured output.")
    # MCP Toolset config handled separately if ADK is enabled
    use_mcp_toolset: bool = Field(default=True, description="Use ADK's MCPToolset for MCP client connections if ADK is enabled.")
    # Runner config handled separately

    model_config = ConfigDict(arbitrary_types_allowed=True)
AgentConfig

Bases: BaseModel

Main configuration schema for an EnhancedAgent.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
class AgentConfig(BaseModel):
    """Main configuration schema for an EnhancedAgent."""
    agent_name: str = Field(..., description="Unique name for this agent instance.")
    version: str = Field(default="0.1.0")

    agent_instruction: str = Field(default="You are a helpful AI assistant. Answer user questions to the best of your knowledge. Respond concisely. use tools when needed")
    agent_description: str = Field(default="An configurable, production-ready agent with integrated capabilities.")

    # Model Selection
    models: list[ModelConfig] = Field(..., description="List of available LLM configurations.")
    default_llm_model: str = Field(..., description="Name of the ModelConfig to use for general LLM calls.")
    formatter_llm_model: str | None = Field(default=None, description="Optional: Name of a faster/cheaper ModelConfig for a_format_class calls.")

    # Core Agent Settings
    world_model_initial_data: dict[str, Any] | None = Field(default=None)
    enable_streaming: bool = Field(default=False)
    verbose: bool = Field(default=False)
    log_level: str = Field(default="INFO", description="Logging level (DEBUG, INFO, WARNING, ERROR).")
    max_history_length: int = Field(default=20, description="Max conversation turns for LiteLLM history.")
    trim_strategy: Literal["litellm", "basic"] = Field(default="litellm")
    persist_history: bool = Field(default=True, description="Persist conversation history (requires persistent ChatSession).")
    user_id_default: str | None = Field(default=None, description="Default user ID for interactions.")

    # Secure Code Execution
    code_executor_type: Literal["restricted", "docker", "none"] = Field(default="restricted", description="Type of code executor to use.")
    code_executor_config: dict[str, Any] = Field(default_factory=dict, description="Configuration specific to the chosen code executor.")
    enable_adk_code_execution_tool: bool = Field(default=True, description="Expose code execution as an ADK tool if ADK is enabled.")

    # Framework Integrations
    adk: ADKConfig | None = Field(default_factory=ADKConfig if ADK_AVAILABLE_CONF else lambda: None)
    mcp: MCPConfig | None = Field(default_factory=MCPConfig if MCP_AVAILABLE_CONF else lambda: None)
    a2a: A2AConfig | None = Field(default_factory=A2AConfig if A2A_AVAILABLE_CONF else lambda: None)

    # Observability & Cost
    observability: ObservabilityConfig | None = Field(default_factory=ObservabilityConfig)
    budget_manager: BudgetManager | None = Field(default=None, description="Global LiteLLM budget manager instance.") # Needs to be passed in

    # Human-in-the-Loop
    enable_hitl: bool = Field(default=False, description="Enable basic Human-in-the-Loop hooks.")

    # Add other global settings as needed

    model_config = ConfigDict(arbitrary_types_allowed=True)

    @model_validator(mode='after')
    def validate_model_references(self) -> 'AgentConfig':
        model_names = {m.name for m in self.models}
        if self.default_llm_model not in model_names:
            raise ValueError(f"default_llm_model '{self.default_llm_model}' not found in defined models.")
        if self.formatter_llm_model and self.formatter_llm_model not in model_names:
            raise ValueError(f"formatter_llm_model '{self.formatter_llm_model}' not found in defined models.")
        return self

    @model_validator(mode='after')
    def validate_framework_availability(self) -> 'AgentConfig':
        if self.adk and self.adk.enabled and not ADK_AVAILABLE_CONF:
            logger.warning("ADK configuration provided but ADK library not installed. Disabling ADK features.")
            self.adk.enabled = False
        if self.mcp and (self.mcp.server or self.mcp.client_connections) and not MCP_AVAILABLE_CONF:
             logger.warning("MCP configuration provided but MCP library not installed. Disabling MCP features.")
             self.mcp = None # Or disable specific parts
        if self.a2a and (self.a2a.server or self.a2a.known_agents) and not A2A_AVAILABLE_CONF:
             logger.warning("A2A configuration provided but A2A library not installed. Disabling A2A features.")
             self.a2a = None # Or disable specific parts
        return self

    @classmethod
    def load_from_yaml(cls, path: str | Path) -> 'AgentConfig':
        """Loads configuration from a YAML file."""
        file_path = Path(path)
        if not file_path.is_file():
            raise FileNotFoundError(f"Configuration file not found: {path}")
        with open(file_path) as f:
            config_data = yaml.safe_load(f)
        logger.info(f"Loaded agent configuration from {path}")
        return cls(**config_data)

    def save_to_yaml(self, path: str | Path):
        """Saves the current configuration to a YAML file."""
        file_path = Path(path)
        file_path.parent.mkdir(parents=True, exist_ok=True)
        with open(file_path, 'w') as f:
            # Use Pydantic's model_dump for clean serialization
            yaml.dump(self.model_dump(mode='python'), f, sort_keys=False)
        logger.info(f"Saved agent configuration to {path}")
load_from_yaml(path) classmethod

Loads configuration from a YAML file.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
201
202
203
204
205
206
207
208
209
210
@classmethod
def load_from_yaml(cls, path: str | Path) -> 'AgentConfig':
    """Loads configuration from a YAML file."""
    file_path = Path(path)
    if not file_path.is_file():
        raise FileNotFoundError(f"Configuration file not found: {path}")
    with open(file_path) as f:
        config_data = yaml.safe_load(f)
    logger.info(f"Loaded agent configuration from {path}")
    return cls(**config_data)
save_to_yaml(path)

Saves the current configuration to a YAML file.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
212
213
214
215
216
217
218
219
def save_to_yaml(self, path: str | Path):
    """Saves the current configuration to a YAML file."""
    file_path = Path(path)
    file_path.parent.mkdir(parents=True, exist_ok=True)
    with open(file_path, 'w') as f:
        # Use Pydantic's model_dump for clean serialization
        yaml.dump(self.model_dump(mode='python'), f, sort_keys=False)
    logger.info(f"Saved agent configuration to {path}")
MCPConfig

Bases: BaseModel

Configuration for MCP integration.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
107
108
109
110
111
112
113
class MCPConfig(BaseModel):
    """Configuration for MCP integration."""
    server: dict[str, Any] | None = Field(default=None, description="Configuration to run an MCP server (host, port, etc.).")
    client_connections: dict[str, str] = Field(default_factory=dict, description="Named MCP server URLs to connect to as a client (e.g., {'files': 'stdio:npx @mcp/server-filesystem /data'}).")
    # ADK's MCPToolset handles client connections if ADKConfig.use_mcp_toolset is True

    model_config = ConfigDict(arbitrary_types_allowed=True)
ModelConfig

Bases: BaseModel

Configuration specific to an LLM model via LiteLLM.

Source code in toolboxv2/mods/isaa/base/Agent/config.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class ModelConfig(BaseModel):
    """Configuration specific to an LLM model via LiteLLM."""
    # Used as key for model selection
    name: str = Field(..., description="Unique identifier/alias for this model configuration (e.g., 'fast_formatter', 'main_reasoner').")
    model: str = Field(..., description="LiteLLM model string (e.g., 'gemini/gemini-1.5-pro-latest', 'ollama/mistral').")
    provider: str | None = Field(default=None, description="LiteLLM provider override if needed.")
    api_key: str | None = Field(default=None, description="API Key (consider using environment variables).")
    api_base: str | None = Field(default=None, description="API Base URL (for local models, proxies).")
    api_version: str | None = Field(default=None, description="API Version (e.g., for Azure).")

    # Common LLM Parameters
    temperature: float | None = Field(default=0.7)
    top_p: float | None = Field(default=None)
    top_k: int | None = Field(default=None)
    max_tokens: int | None = Field(default=2048, description="Max tokens for generation.")
    max_input_tokens: int | None = Field(default=None, description="Max input context window (autodetected if None).")
    stop_sequence: list[str] | None = Field(default=None)
    presence_penalty: float | None = Field(default=None)
    frequency_penalty: float | None = Field(default=None)
    system_message: str | None = Field(default=None, description="Default system message for this model.")

    # LiteLLM Specific
    caching: bool = Field(default=True, description="Enable LiteLLM caching for this model.")
    # budget_manager: Optional[BudgetManager] = Field(default=None) # Budget manager applied globally or per-agent

    model_config = ConfigDict(arbitrary_types_allowed=True, extra='allow') # Allow extra LiteLLM params
ObservabilityConfig

Bases: BaseModel

Configuration for observability (OpenTelemetry).

Source code in toolboxv2/mods/isaa/base/Agent/config.py
125
126
127
128
129
130
131
132
class ObservabilityConfig(BaseModel):
    """Configuration for observability (OpenTelemetry)."""
    enabled: bool = Field(default=True)
    endpoint: str | None = Field(default=None, description="OTLP endpoint URL (e.g., http://jaeger:4317).")
    service_name: str | None = Field(default=None, description="Service name for traces/metrics (defaults to agent name).")
    # Add more OTel config options as needed (headers, certs, resource attributes)

    model_config = ConfigDict(arbitrary_types_allowed=True)
executors
DockerCodeExecutor

Bases: _BaseExecutorClass

Executes Python code in a sandboxed Docker container.

Requires Docker to be installed and running, and the 'docker' Python SDK.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
class DockerCodeExecutor(_BaseExecutorClass):
    """
    Executes Python code in a sandboxed Docker container.

    Requires Docker to be installed and running, and the 'docker' Python SDK.
    """
    DEFAULT_DOCKER_IMAGE = "python:3.10-slim" # Use a minimal image
    DEFAULT_TIMEOUT = 10 # Seconds
    DEFAULT_MEM_LIMIT = "128m"
    DEFAULT_CPUS = 0.5

    def __init__(self,
                 docker_image: str = DEFAULT_DOCKER_IMAGE,
                 timeout: int = DEFAULT_TIMEOUT,
                 mem_limit: str = DEFAULT_MEM_LIMIT,
                 cpus: float = DEFAULT_CPUS,
                 network_mode: str = "none", # Disable networking by default for security
                 docker_client_config: dict | None = None):
        if not DOCKER_AVAILABLE:
            raise ImportError("Docker SDK not installed ('pip install docker'). Cannot use DockerCodeExecutor.")

        self.docker_image = docker_image
        self.timeout = timeout
        self.mem_limit = mem_limit
        self.cpus = cpus
        self.network_mode = network_mode
        try:
            self.client = docker.from_env(**(docker_client_config or {}))
            self.client.ping() # Check connection
            # Ensure image exists locally or pull it
            try:
                self.client.images.get(self.docker_image)
                logger.info(f"Docker image '{self.docker_image}' found locally.")
            except ImageNotFound:
                logger.warning(f"Docker image '{self.docker_image}' not found locally. Attempting to pull...")
                try:
                    self.client.images.pull(self.docker_image)
                    logger.info(f"Successfully pulled Docker image '{self.docker_image}'.")
                except APIError as pull_err:
                    raise RuntimeError(f"Failed to pull Docker image '{self.docker_image}': {pull_err}") from pull_err
        except Exception as e:
            raise RuntimeError(f"Failed to connect to Docker daemon: {e}. Is Docker running?") from e
        logger.info(f"DockerCodeExecutor initialized (Image: {docker_image}, Timeout: {timeout}s, Network: {network_mode})")

    def _execute(self, code: str) -> dict[str, Any]:
        """Internal execution logic."""
        result = {"stdout": "", "stderr": "", "error": None, "exit_code": None}
        container = None

        try:
            logger.debug(f"Creating Docker container from image '{self.docker_image}'...")
            container = self.client.containers.run(
                image=self.docker_image,
                command=["python", "-c", code],
                detach=True,
                mem_limit=self.mem_limit,
                nano_cpus=int(self.cpus * 1e9),
                network_mode=self.network_mode,
                # Security considerations: Consider read-only filesystem, dropping capabilities
                read_only=True,
                # working_dir="/app", # Define a working dir if needed
                # volumes={...} # Mount volumes carefully if required
            )
            logger.debug(f"Container '{container.short_id}' started.")

            # Wait for container completion with timeout
            container_result = container.wait(timeout=self.timeout)
            result["exit_code"] = container_result.get("StatusCode", None)

            # Retrieve logs
            result["stdout"] = container.logs(stdout=True, stderr=False).decode('utf-8', errors='replace').strip()
            result["stderr"] = container.logs(stdout=False, stderr=True).decode('utf-8', errors='replace').strip()

            logger.debug(f"Container '{container.short_id}' finished with exit code {result['exit_code']}.")
            if result["exit_code"] != 0:
                 logger.warning(f"Container stderr: {result['stderr'][:500]}...") # Log stderr on failure

        except ContainerError as e:
            result["error"] = f"ContainerError: {e}"
            result["stderr"] = e.stderr.decode('utf-8', errors='replace').strip() if e.stderr else str(e)
            result["exit_code"] = e.exit_status
            logger.error(f"Container '{container.short_id if container else 'N/A'}' failed: {result['error']}\nStderr: {result['stderr']}")
        except APIError as e:
            result["error"] = f"Docker APIError: {e}"
            result["exit_code"] = -1
            logger.error(f"Docker API error during execution: {e}")
        except Exception as e:
            # Catch potential timeout errors from container.wait or other unexpected issues
            result["error"] = f"Unexpected execution error: {type(e).__name__}: {e}"
            result["exit_code"] = -1
            # Check if it looks like a timeout
            if isinstance(e, TimeoutError) or "Timeout" in str(e): # docker SDK might raise requests.exceptions.ReadTimeout
                result["stderr"] = f"Execution timed out after {self.timeout} seconds."
                logger.warning(f"Container execution timed out ({self.timeout}s).")
            else:
                logger.error(f"Unexpected error during Docker execution: {e}", exc_info=True)
        finally:
            if container:
                try:
                    logger.debug(f"Removing container '{container.short_id}'...")
                    container.remove(force=True)
                except APIError as rm_err:
                    logger.warning(f"Failed to remove container {container.short_id}: {rm_err}")

        return result

     # --- ADK Compatibility Method ---
    if ADK_EXEC_AVAILABLE:
        def execute_code(self, invocation_context: InvocationContext, code_input: CodeExecutionInput) -> CodeExecutionResult:
            logger.debug(f"DockerCodeExecutor executing ADK request (lang: {code_input.language}). Code: {code_input.code[:100]}...")
            if code_input.language.lower() != 'python':
                 return CodeExecutionResult(output=f"Error: Unsupported language '{code_input.language}'. Only Python is supported.", outcome="OUTCOME_FAILURE")

            exec_result = self._execute(code_input.code)

            output_str = ""
            if exec_result["stdout"]:
                output_str += f"Stdout:\n{exec_result['stdout']}\n"
            if exec_result["stderr"]:
                 output_str += f"Stderr:\n{exec_result['stderr']}\n"
            if not output_str and exec_result["exit_code"] == 0:
                 output_str = "Execution successful with no output."
            elif not output_str and exec_result["exit_code"] != 0:
                 output_str = f"Execution failed with no output (Exit code: {exec_result['exit_code']}). Error: {exec_result['error']}"

            outcome = "OUTCOME_SUCCESS" if exec_result["exit_code"] == 0 else "OUTCOME_FAILURE"

            return CodeExecutionResult(output=output_str.strip(), outcome=outcome)
    # --- End ADK Compatibility ---

    # --- Direct Call Method ---
    def execute(self, code: str) -> dict[str, Any]:
        """Directly execute code, returning detailed dictionary."""
        logger.debug(f"DockerCodeExecutor executing direct call. Code: {code[:100]}...")
        return self._execute(code)
execute(code)

Directly execute code, returning detailed dictionary.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
333
334
335
336
def execute(self, code: str) -> dict[str, Any]:
    """Directly execute code, returning detailed dictionary."""
    logger.debug(f"DockerCodeExecutor executing direct call. Code: {code[:100]}...")
    return self._execute(code)
RestrictedPythonExecutor

Bases: _BaseExecutorClass

Executes Python code using restrictedpython.

Safer than exec() but NOT a full sandbox. Known vulnerabilities exist. Use with extreme caution and only with trusted code sources or for low-risk operations. Docker is strongly recommended for untrusted code.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
class RestrictedPythonExecutor(_BaseExecutorClass):
    """
    Executes Python code using restrictedpython.

    Safer than exec() but NOT a full sandbox. Known vulnerabilities exist.
    Use with extreme caution and only with trusted code sources or for
    low-risk operations. Docker is strongly recommended for untrusted code.
    """
    DEFAULT_ALLOWED_GLOBALS = {
        **safe_globals,
        '_print_': restrictedpython.PrintCollector,
        '_getattr_': restrictedpython.safe_getattr,
        '_getitem_': restrictedpython.safe_getitem,
        '_write_': restrictedpython.guarded_setattr, # Allows modifying specific safe objects if needed
        # Add other safe builtins or modules carefully
        'math': __import__('math'),
        'random': __import__('random'),
        'datetime': __import__('datetime'),
        'time': __import__('time'),
        # 'requests': None, # Example: Explicitly disallow
    }

    def __init__(self, allowed_globals: dict | None = None, max_execution_time: int = 5):
        if not RESTRICTEDPYTHON_AVAILABLE:
            raise ImportError("restrictedpython is not installed. Cannot use RestrictedPythonExecutor.")
        self.allowed_globals = allowed_globals or self.DEFAULT_ALLOWED_GLOBALS
        self.max_execution_time = max_execution_time # Basic timeout (not perfectly enforced by restrictedpython)
        logger.warning("Initialized RestrictedPythonExecutor. This provides LIMITED sandboxing. Use Docker for untrusted code.")

    def _execute(self, code: str) -> dict[str, Any]:
        """Internal execution logic."""
        start_time = time.monotonic()
        result = {"stdout": "", "stderr": "", "error": None, "exit_code": None}
        local_vars = {}
        stdout_capture = io.StringIO()
        stderr_capture = io.StringIO()

        try:
            # Basic timeout check (not preemptive)
            if time.monotonic() - start_time > self.max_execution_time:
                 raise TimeoutError(f"Execution exceeded max time of {self.max_execution_time}s (pre-check).")

            # Compile the code in restricted mode
            byte_code = compile_restricted(code, filename='<inline code>', mode='exec')

            # Add a print collector to capture output
            self.allowed_globals['_print_'] = restrictedpython.PrintCollector
            print_collector = self.allowed_globals['_print_']()
            exec_globals = {**self.allowed_globals, '_print': print_collector}

            # Execute the compiled code
            # Note: restrictedpython does not inherently support robust timeouts during exec
            exec(byte_code, exec_globals, local_vars)

            # Check execution time again
            duration = time.monotonic() - start_time
            if duration > self.max_execution_time:
                logger.warning(f"Execution finished but exceeded max time ({duration:.2f}s > {self.max_execution_time}s).")
                # Potentially treat as an error or partial success

            result["stdout"] = print_collector.printed_text # Access collected prints
            result["exit_code"] = 0 # Assume success if no exception

        except TimeoutError as e:
            result["stderr"] = f"TimeoutError: {e}"
            result["error"] = str(e)
            result["exit_code"] = -1 # Indicate timeout
        except SyntaxError as e:
            result["stderr"] = f"SyntaxError: {e}"
            result["error"] = str(e)
            result["exit_code"] = 1
        except Exception as e:
            # Capture other potential execution errors allowed by restrictedpython
            error_type = type(e).__name__
            error_msg = f"{error_type}: {e}"
            result["stderr"] = error_msg
            result["error"] = str(e)
            result["exit_code"] = 1
            logger.warning(f"RestrictedPython execution caught exception: {error_msg}", exc_info=False) # Avoid logging potentially sensitive details from code
        finally:
            stdout_capture.close() # Not used directly with PrintCollector
            stderr_capture.close()

        return result

    # --- ADK Compatibility Method ---
    if ADK_EXEC_AVAILABLE:
        def execute_code(self, invocation_context: InvocationContext, code_input: CodeExecutionInput) -> CodeExecutionResult:
            logger.debug(f"RestrictedPythonExecutor executing ADK request (lang: {code_input.language}). Code: {code_input.code[:100]}...")
            if code_input.language.lower() != 'python':
                 return CodeExecutionResult(output=f"Error: Unsupported language '{code_input.language}'. Only Python is supported.", outcome="OUTCOME_FAILURE")

            exec_result = self._execute(code_input.code)

            output_str = ""
            if exec_result["stdout"]:
                output_str += f"Stdout:\n{exec_result['stdout']}\n"
            if exec_result["stderr"]:
                 output_str += f"Stderr:\n{exec_result['stderr']}\n"
            if not output_str and exec_result["exit_code"] == 0:
                 output_str = "Execution successful with no output."
            elif not output_str and exec_result["exit_code"] != 0:
                 output_str = f"Execution failed with no output (Exit code: {exec_result['exit_code']}). Error: {exec_result['error']}"


            outcome = "OUTCOME_SUCCESS" if exec_result["exit_code"] == 0 else "OUTCOME_FAILURE"

            return CodeExecutionResult(output=output_str.strip(), outcome=outcome)
    # --- End ADK Compatibility ---

    # --- Direct Call Method ---
    def execute(self, code: str) -> dict[str, Any]:
        """Directly execute code, returning detailed dictionary."""
        logger.debug(f"RestrictedPythonExecutor executing direct call. Code: {code[:100]}...")
        return self._execute(code)
execute(code)

Directly execute code, returning detailed dictionary.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
193
194
195
196
def execute(self, code: str) -> dict[str, Any]:
    """Directly execute code, returning detailed dictionary."""
    logger.debug(f"RestrictedPythonExecutor executing direct call. Code: {code[:100]}...")
    return self._execute(code)
get_code_executor(config)

Creates a code executor instance based on configuration.

Source code in toolboxv2/mods/isaa/base/Agent/executors.py
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
def get_code_executor(config: 'AgentConfig') -> RestrictedPythonExecutor | DockerCodeExecutor | BaseCodeExecutor | None:
    """Creates a code executor instance based on configuration."""
    executor_type = config.code_executor_type
    executor_config = config.code_executor_config or {}

    if executor_type == "restricted":
        if not RESTRICTEDPYTHON_AVAILABLE:
            logger.error("RestrictedPython executor configured but library not installed. Code execution disabled.")
            return None
        return RestrictedPythonExecutor(**executor_config)
    elif executor_type == "docker":
        if not DOCKER_AVAILABLE:
            logger.error("Docker executor configured but library not installed or Docker not running. Code execution disabled.")
            return None
        try:
            return DockerCodeExecutor(**executor_config)
        except Exception as e:
            logger.error(f"Failed to initialize DockerCodeExecutor: {e}. Code execution disabled.")
            return None
    elif executor_type == "none":
        logger.info("Code execution explicitly disabled in configuration.")
        return None
    elif executor_type and ADK_EXEC_AVAILABLE and isinstance(executor_type, BaseCodeExecutor):
        # Allow passing a pre-configured ADK executor instance
        logger.info(f"Using pre-configured ADK code executor: {type(executor_type).__name__}")
        return executor_type
    else:
        logger.warning(f"Unknown or unsupported code_executor_type: '{executor_type}'. Code execution disabled.")
        return None
types
AgentCheckpoint dataclass

Enhanced AgentCheckpoint with UnifiedContextManager and ChatSession integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
@dataclass
class AgentCheckpoint:
    """Enhanced AgentCheckpoint with UnifiedContextManager and ChatSession integration"""
    timestamp: datetime
    agent_state: dict[str, Any]
    task_state: dict[str, Any]
    world_model: dict[str, Any]
    active_flows: list[str]
    metadata: dict[str, Any] = field(default_factory=dict)

    # NEUE: Enhanced checkpoint data for UnifiedContextManager integration
    session_data: dict[str, Any] = field(default_factory=dict)
    context_manager_state: dict[str, Any] = field(default_factory=dict)
    conversation_history: list[dict[str, Any]] = field(default_factory=list)
    variable_system_state: dict[str, Any] = field(default_factory=dict)
    results_store: dict[str, Any] = field(default_factory=dict)
    tool_capabilities: dict[str, Any] = field(default_factory=dict)
    variable_scopes: dict[str, Any] = field(default_factory=dict)

    # Optional: Additional system state
    performance_metrics: dict[str, Any] = field(default_factory=dict)
    execution_history: list[dict[str, Any]] = field(default_factory=list)

    def get_checkpoint_summary(self) -> str:
        """Get human-readable checkpoint summary"""
        try:
            summary_parts = []

            # Basic info
            if self.session_data:
                session_count = len([s for s in self.session_data.values() if s.get("status") != "failed"])
                summary_parts.append(f"{session_count} sessions")

            # Task info
            if self.task_state:
                completed_tasks = len([t for t in self.task_state.values() if t.get("status") == "completed"])
                total_tasks = len(self.task_state)
                summary_parts.append(f"{completed_tasks}/{total_tasks} tasks")

            # Conversation info
            if self.conversation_history:
                summary_parts.append(f"{len(self.conversation_history)} messages")

            # Context info
            if self.context_manager_state:
                cache_count = self.context_manager_state.get("cache_entries", 0)
                if cache_count > 0:
                    summary_parts.append(f"{cache_count} cached contexts")

            # Variable system info
            if self.variable_system_state:
                scopes = len(self.variable_system_state.get("scopes", {}))
                summary_parts.append(f"{scopes} variable scopes")

            # Tool capabilities
            if self.tool_capabilities:
                summary_parts.append(f"{len(self.tool_capabilities)} analyzed tools")

            return "; ".join(summary_parts) if summary_parts else "Basic checkpoint"

        except Exception as e:
            return f"Summary generation failed: {str(e)}"

    def get_storage_size_estimate(self) -> dict[str, int]:
        """Estimate storage size of different checkpoint components"""
        try:
            sizes = {}

            # Calculate sizes in bytes (approximate)
            sizes["agent_state"] = len(str(self.agent_state))
            sizes["task_state"] = len(str(self.task_state))
            sizes["world_model"] = len(str(self.world_model))
            sizes["conversation_history"] = len(str(self.conversation_history))
            sizes["session_data"] = len(str(self.session_data))
            sizes["context_manager_state"] = len(str(self.context_manager_state))
            sizes["variable_system_state"] = len(str(self.variable_system_state))
            sizes["results_store"] = len(str(self.results_store))
            sizes["tool_capabilities"] = len(str(self.tool_capabilities))

            sizes["total_bytes"] = sum(sizes.values())
            sizes["total_kb"] = sizes["total_bytes"] / 1024
            sizes["total_mb"] = sizes["total_kb"] / 1024

            return sizes

        except Exception as e:
            return {"error": str(e)}

    def validate_checkpoint_integrity(self) -> dict[str, Any]:
        """Validate checkpoint integrity and completeness"""
        validation = {
            "is_valid": True,
            "errors": [],
            "warnings": [],
            "completeness_score": 0.0,
            "components_present": []
        }

        try:
            # Check required components
            required_components = ["timestamp", "agent_state", "task_state", "world_model", "active_flows"]
            for component in required_components:
                if hasattr(self, component) and getattr(self, component) is not None:
                    validation["components_present"].append(component)
                else:
                    validation["errors"].append(f"Missing required component: {component}")
                    validation["is_valid"] = False

            # Check optional enhanced components
            enhanced_components = ["session_data", "context_manager_state", "conversation_history",
                                   "variable_system_state", "results_store", "tool_capabilities"]

            for component in enhanced_components:
                if hasattr(self, component) and getattr(self, component):
                    validation["components_present"].append(component)

            # Calculate completeness score
            total_possible = len(required_components) + len(enhanced_components)
            validation["completeness_score"] = len(validation["components_present"]) / total_possible

            # Check timestamp validity
            if isinstance(self.timestamp, datetime):
                age_hours = (datetime.now() - self.timestamp).total_seconds() / 3600
                if age_hours > 24:
                    validation["warnings"].append(f"Checkpoint is {age_hours:.1f} hours old")
            else:
                validation["errors"].append("Invalid timestamp format")
                validation["is_valid"] = False

            # Check session data consistency
            if self.session_data and self.conversation_history:
                session_ids_in_data = set(self.session_data.keys())
                session_ids_in_conversation = set(
                    msg.get("session_id") for msg in self.conversation_history
                    if msg.get("session_id")
                )

                if session_ids_in_data != session_ids_in_conversation:
                    validation["warnings"].append("Session data and conversation history session IDs don't match")

            return validation

        except Exception as e:
            validation["errors"].append(f"Validation error: {str(e)}")
            validation["is_valid"] = False
            return validation

    def get_version_info(self) -> dict[str, str]:
        """Get checkpoint version information"""
        return {
            "checkpoint_version": self.metadata.get("checkpoint_version", "1.0"),
            "data_format": "enhanced" if self.session_data or self.context_manager_state else "basic",
            "context_system": "unified" if self.context_manager_state else "legacy",
            "variable_system": "integrated" if self.variable_system_state else "basic",
            "session_management": "chatsession" if self.session_data else "memory_only",
            "created_with": "FlowAgent v2.0 Enhanced Context System"
        }
get_checkpoint_summary()

Get human-readable checkpoint summary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
def get_checkpoint_summary(self) -> str:
    """Get human-readable checkpoint summary"""
    try:
        summary_parts = []

        # Basic info
        if self.session_data:
            session_count = len([s for s in self.session_data.values() if s.get("status") != "failed"])
            summary_parts.append(f"{session_count} sessions")

        # Task info
        if self.task_state:
            completed_tasks = len([t for t in self.task_state.values() if t.get("status") == "completed"])
            total_tasks = len(self.task_state)
            summary_parts.append(f"{completed_tasks}/{total_tasks} tasks")

        # Conversation info
        if self.conversation_history:
            summary_parts.append(f"{len(self.conversation_history)} messages")

        # Context info
        if self.context_manager_state:
            cache_count = self.context_manager_state.get("cache_entries", 0)
            if cache_count > 0:
                summary_parts.append(f"{cache_count} cached contexts")

        # Variable system info
        if self.variable_system_state:
            scopes = len(self.variable_system_state.get("scopes", {}))
            summary_parts.append(f"{scopes} variable scopes")

        # Tool capabilities
        if self.tool_capabilities:
            summary_parts.append(f"{len(self.tool_capabilities)} analyzed tools")

        return "; ".join(summary_parts) if summary_parts else "Basic checkpoint"

    except Exception as e:
        return f"Summary generation failed: {str(e)}"
get_storage_size_estimate()

Estimate storage size of different checkpoint components

Source code in toolboxv2/mods/isaa/base/Agent/types.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
def get_storage_size_estimate(self) -> dict[str, int]:
    """Estimate storage size of different checkpoint components"""
    try:
        sizes = {}

        # Calculate sizes in bytes (approximate)
        sizes["agent_state"] = len(str(self.agent_state))
        sizes["task_state"] = len(str(self.task_state))
        sizes["world_model"] = len(str(self.world_model))
        sizes["conversation_history"] = len(str(self.conversation_history))
        sizes["session_data"] = len(str(self.session_data))
        sizes["context_manager_state"] = len(str(self.context_manager_state))
        sizes["variable_system_state"] = len(str(self.variable_system_state))
        sizes["results_store"] = len(str(self.results_store))
        sizes["tool_capabilities"] = len(str(self.tool_capabilities))

        sizes["total_bytes"] = sum(sizes.values())
        sizes["total_kb"] = sizes["total_bytes"] / 1024
        sizes["total_mb"] = sizes["total_kb"] / 1024

        return sizes

    except Exception as e:
        return {"error": str(e)}
get_version_info()

Get checkpoint version information

Source code in toolboxv2/mods/isaa/base/Agent/types.py
686
687
688
689
690
691
692
693
694
695
def get_version_info(self) -> dict[str, str]:
    """Get checkpoint version information"""
    return {
        "checkpoint_version": self.metadata.get("checkpoint_version", "1.0"),
        "data_format": "enhanced" if self.session_data or self.context_manager_state else "basic",
        "context_system": "unified" if self.context_manager_state else "legacy",
        "variable_system": "integrated" if self.variable_system_state else "basic",
        "session_management": "chatsession" if self.session_data else "memory_only",
        "created_with": "FlowAgent v2.0 Enhanced Context System"
    }
validate_checkpoint_integrity()

Validate checkpoint integrity and completeness

Source code in toolboxv2/mods/isaa/base/Agent/types.py
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
def validate_checkpoint_integrity(self) -> dict[str, Any]:
    """Validate checkpoint integrity and completeness"""
    validation = {
        "is_valid": True,
        "errors": [],
        "warnings": [],
        "completeness_score": 0.0,
        "components_present": []
    }

    try:
        # Check required components
        required_components = ["timestamp", "agent_state", "task_state", "world_model", "active_flows"]
        for component in required_components:
            if hasattr(self, component) and getattr(self, component) is not None:
                validation["components_present"].append(component)
            else:
                validation["errors"].append(f"Missing required component: {component}")
                validation["is_valid"] = False

        # Check optional enhanced components
        enhanced_components = ["session_data", "context_manager_state", "conversation_history",
                               "variable_system_state", "results_store", "tool_capabilities"]

        for component in enhanced_components:
            if hasattr(self, component) and getattr(self, component):
                validation["components_present"].append(component)

        # Calculate completeness score
        total_possible = len(required_components) + len(enhanced_components)
        validation["completeness_score"] = len(validation["components_present"]) / total_possible

        # Check timestamp validity
        if isinstance(self.timestamp, datetime):
            age_hours = (datetime.now() - self.timestamp).total_seconds() / 3600
            if age_hours > 24:
                validation["warnings"].append(f"Checkpoint is {age_hours:.1f} hours old")
        else:
            validation["errors"].append("Invalid timestamp format")
            validation["is_valid"] = False

        # Check session data consistency
        if self.session_data and self.conversation_history:
            session_ids_in_data = set(self.session_data.keys())
            session_ids_in_conversation = set(
                msg.get("session_id") for msg in self.conversation_history
                if msg.get("session_id")
            )

            if session_ids_in_data != session_ids_in_conversation:
                validation["warnings"].append("Session data and conversation history session IDs don't match")

        return validation

    except Exception as e:
        validation["errors"].append(f"Validation error: {str(e)}")
        validation["is_valid"] = False
        return validation
AgentModelData

Bases: BaseModel

Source code in toolboxv2/mods/isaa/base/Agent/types.py
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
class AgentModelData(BaseModel):
    name: str = "FlowAgent"
    fast_llm_model: str = "openrouter/anthropic/claude-3-haiku"
    complex_llm_model: str = "openrouter/openai/gpt-4o"
    system_message: str = "You are a production-ready autonomous agent."
    temperature: float = 0.7
    max_tokens: int = 2048
    max_input_tokens: int = 32768
    api_key: str | None  = None
    api_base: str | None  = None
    budget_manager: Any  = None
    caching: bool = True
    persona: PersonaConfig | None = True
    use_fast_response: bool = True

    def get_system_message_with_persona(self) -> str:
        """Get system message with persona integration"""
        base_message = self.system_message

        if self.persona and self.persona.apply_method in ["system_prompt", "both"]:
            persona_addition = self.persona.to_system_prompt_addition()
            if persona_addition:
                base_message += f"\n## Persona Instructions\n{persona_addition}"

        return base_message
get_system_message_with_persona()

Get system message with persona integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
770
771
772
773
774
775
776
777
778
779
def get_system_message_with_persona(self) -> str:
    """Get system message with persona integration"""
    base_message = self.system_message

    if self.persona and self.persona.apply_method in ["system_prompt", "both"]:
        persona_addition = self.persona.to_system_prompt_addition()
        if persona_addition:
            base_message += f"\n## Persona Instructions\n{persona_addition}"

    return base_message
ChainMetadata dataclass

Metadata for stored chains

Source code in toolboxv2/mods/isaa/base/Agent/types.py
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
@dataclass
class ChainMetadata:
    """Metadata for stored chains"""
    name: str
    description: str = ""
    created_at: datetime = field(default_factory=datetime.now)
    modified_at: datetime = field(default_factory=datetime.now)
    version: str = "1.0.0"
    tags: list[str] = field(default_factory=list)
    author: str = ""
    complexity: str = "simple"  # simple, medium, complex
    agent_count: int = 0
    has_conditionals: bool = False
    has_parallels: bool = False
    has_error_handling: bool = False
DecisionTask dataclass

Bases: Task

Task für dynamisches Routing

Source code in toolboxv2/mods/isaa/base/Agent/types.py
493
494
495
496
497
498
@dataclass
class DecisionTask(Task):
    """Task für dynamisches Routing"""
    decision_prompt: str = ""  # Kurze Frage an LLM
    routing_map: dict[str, str] = field(default_factory=dict)  # Ergebnis -> nächster Task
    decision_model: str = "fast"  # Welches LLM für Entscheidung
FormatConfig dataclass

Konfiguration für Response-Format und -Länge

Source code in toolboxv2/mods/isaa/base/Agent/types.py
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
@dataclass
class FormatConfig:
    """Konfiguration für Response-Format und -Länge"""
    response_format: ResponseFormat = ResponseFormat.FREE_TEXT
    text_length: TextLength = TextLength.CHAT_CONVERSATION
    custom_instructions: str = ""
    strict_format_adherence: bool = True
    quality_threshold: float = 0.7

    def get_format_instructions(self) -> str:
        """Generiere Format-spezifische Anweisungen"""
        format_instructions = {
            ResponseFormat.FREE_TEXT: "Use natural continuous text without special formatting.",
            ResponseFormat.WITH_TABLES: "Integrate tables for structured data representation. Use Markdown tables.",
            ResponseFormat.WITH_BULLET_POINTS: "Structure information with bullet points (•, -, *) for better readability.",
            ResponseFormat.WITH_LISTS: "Use numbered and unnumbered lists to organize content.",
            ResponseFormat.TEXT_ONLY: "Plain text only without formatting, symbols, or structural elements.",
            ResponseFormat.MD_TEXT: "Full Markdown formatting with headings, code blocks, links, etc.",
            ResponseFormat.YAML_TEXT: "Structure responses in YAML format for machine-readable output.",
            ResponseFormat.JSON_TEXT: "Format responses as a JSON structure for API integration.",
            ResponseFormat.PSEUDO_CODE: "Use pseudocode structure for algorithmic or logical explanations.",
            ResponseFormat.CODE_STRUCTURE: "Structure like code with indentation, comments, and logical blocks."
        }
        return format_instructions.get(self.response_format, "Standard-Formatierung.")

    def get_length_instructions(self) -> str:
        """Generiere Längen-spezifische Anweisungen"""
        length_instructions = {
            TextLength.MINI_CHAT: "Very short, concise answers (1–2 sentences, max 50 words). Chat style.",
            TextLength.CHAT_CONVERSATION: "Moderate conversation length (2–4 sentences, 50–150 words). Natural conversational style.",
            TextLength.TABLE_CONVERSATION: "Structured, tabular presentation with compact explanations (100–250 words).",
            TextLength.DETAILED_INDEPTH: "Comprehensive, detailed explanations (300–800 words) with depth and context.",
            TextLength.PHD_LEVEL: "Academic depth with extensive explanations (800+ words), references, and technical terminology."
        }
        return length_instructions.get(self.text_length, "Standard-Länge.")

    def get_combined_instructions(self) -> str:
        """Kombiniere Format- und Längen-Anweisungen"""
        instructions = []
        instructions.append("## Format-Anforderungen:")
        instructions.append(self.get_format_instructions())
        instructions.append("\n## Längen-Anforderungen:")
        instructions.append(self.get_length_instructions())

        if self.custom_instructions:
            instructions.append("\n## Zusätzliche Anweisungen:")
            instructions.append(self.custom_instructions)

        if self.strict_format_adherence:
            instructions.append("\n## ATTENTION: STRICT FORMAT ADHERENCE REQUIRED!")

        return "\n".join(instructions)

    def get_expected_word_range(self) -> tuple[int, int]:
        """Erwartete Wortanzahl für Qualitätsbewertung"""
        ranges = {
            TextLength.MINI_CHAT: (10, 50),
            TextLength.CHAT_CONVERSATION: (50, 150),
            TextLength.TABLE_CONVERSATION: (100, 250),
            TextLength.DETAILED_INDEPTH: (300, 800),
            TextLength.PHD_LEVEL: (800, 2000)
        }
        return ranges.get(self.text_length, (50, 200))
get_combined_instructions()

Kombiniere Format- und Längen-Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
def get_combined_instructions(self) -> str:
    """Kombiniere Format- und Längen-Anweisungen"""
    instructions = []
    instructions.append("## Format-Anforderungen:")
    instructions.append(self.get_format_instructions())
    instructions.append("\n## Längen-Anforderungen:")
    instructions.append(self.get_length_instructions())

    if self.custom_instructions:
        instructions.append("\n## Zusätzliche Anweisungen:")
        instructions.append(self.custom_instructions)

    if self.strict_format_adherence:
        instructions.append("\n## ATTENTION: STRICT FORMAT ADHERENCE REQUIRED!")

    return "\n".join(instructions)
get_expected_word_range()

Erwartete Wortanzahl für Qualitätsbewertung

Source code in toolboxv2/mods/isaa/base/Agent/types.py
411
412
413
414
415
416
417
418
419
420
def get_expected_word_range(self) -> tuple[int, int]:
    """Erwartete Wortanzahl für Qualitätsbewertung"""
    ranges = {
        TextLength.MINI_CHAT: (10, 50),
        TextLength.CHAT_CONVERSATION: (50, 150),
        TextLength.TABLE_CONVERSATION: (100, 250),
        TextLength.DETAILED_INDEPTH: (300, 800),
        TextLength.PHD_LEVEL: (800, 2000)
    }
    return ranges.get(self.text_length, (50, 200))
get_format_instructions()

Generiere Format-spezifische Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
def get_format_instructions(self) -> str:
    """Generiere Format-spezifische Anweisungen"""
    format_instructions = {
        ResponseFormat.FREE_TEXT: "Use natural continuous text without special formatting.",
        ResponseFormat.WITH_TABLES: "Integrate tables for structured data representation. Use Markdown tables.",
        ResponseFormat.WITH_BULLET_POINTS: "Structure information with bullet points (•, -, *) for better readability.",
        ResponseFormat.WITH_LISTS: "Use numbered and unnumbered lists to organize content.",
        ResponseFormat.TEXT_ONLY: "Plain text only without formatting, symbols, or structural elements.",
        ResponseFormat.MD_TEXT: "Full Markdown formatting with headings, code blocks, links, etc.",
        ResponseFormat.YAML_TEXT: "Structure responses in YAML format for machine-readable output.",
        ResponseFormat.JSON_TEXT: "Format responses as a JSON structure for API integration.",
        ResponseFormat.PSEUDO_CODE: "Use pseudocode structure for algorithmic or logical explanations.",
        ResponseFormat.CODE_STRUCTURE: "Structure like code with indentation, comments, and logical blocks."
    }
    return format_instructions.get(self.response_format, "Standard-Formatierung.")
get_length_instructions()

Generiere Längen-spezifische Anweisungen

Source code in toolboxv2/mods/isaa/base/Agent/types.py
383
384
385
386
387
388
389
390
391
392
def get_length_instructions(self) -> str:
    """Generiere Längen-spezifische Anweisungen"""
    length_instructions = {
        TextLength.MINI_CHAT: "Very short, concise answers (1–2 sentences, max 50 words). Chat style.",
        TextLength.CHAT_CONVERSATION: "Moderate conversation length (2–4 sentences, 50–150 words). Natural conversational style.",
        TextLength.TABLE_CONVERSATION: "Structured, tabular presentation with compact explanations (100–250 words).",
        TextLength.DETAILED_INDEPTH: "Comprehensive, detailed explanations (300–800 words) with depth and context.",
        TextLength.PHD_LEVEL: "Academic depth with extensive explanations (800+ words), references, and technical terminology."
    }
    return length_instructions.get(self.text_length, "Standard-Länge.")
LLMTask dataclass

Bases: Task

Spezialisierter Task für LLM-Aufrufe

Source code in toolboxv2/mods/isaa/base/Agent/types.py
470
471
472
473
474
475
476
477
478
479
480
@dataclass
class LLMTask(Task):
    """Spezialisierter Task für LLM-Aufrufe"""
    llm_config: dict[str, Any] = field(default_factory=lambda: {
        "model_preference": "fast",  # "fast" | "complex"
        "temperature": 0.7,
        "max_tokens": 1024
    })
    prompt_template: str = ""
    context_keys: list[str] = field(default_factory=list)  # Keys aus shared state
    output_schema: dict  = None  # JSON Schema für Validierung
PersonaConfig dataclass
Source code in toolboxv2/mods/isaa/base/Agent/types.py
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
@dataclass
class PersonaConfig:
    name: str
    style: str = "professional"
    personality_traits: list[str] = field(default_factory=lambda: ["helpful", "concise"])
    tone: str = "friendly"
    response_format: str = "direct"
    custom_instructions: str = ""

    format_config: FormatConfig  = None

    apply_method: str = "system_prompt"  # "system_prompt" | "post_process" | "both"
    integration_level: str = "light"  # "light" | "medium" | "heavy"

    def to_system_prompt_addition(self) -> str:
        """Convert persona to system prompt addition with format integration"""
        if self.apply_method in ["system_prompt", "both"]:
            additions = []
            additions.append(f"You are {self.name}.")
            additions.append(f"Your communication style is {self.style} with a {self.tone} tone.")

            if self.personality_traits:
                traits_str = ", ".join(self.personality_traits)
                additions.append(f"Your key traits are: {traits_str}.")

            if self.custom_instructions:
                additions.append(self.custom_instructions)

            # Format-spezifische Anweisungen hinzufügen
            if self.format_config:
                additions.append("\n" + self.format_config.get_combined_instructions())

            return " ".join(additions)
        return ""

    def update_format(self, response_format: ResponseFormat|str, text_length: TextLength|str, custom_instructions: str = ""):
        """Dynamische Format-Aktualisierung"""
        try:
            format_enum = ResponseFormat(response_format) if isinstance(response_format, str) else response_format
            length_enum = TextLength(text_length) if isinstance(text_length, str) else text_length

            if not self.format_config:
                self.format_config = FormatConfig()

            self.format_config.response_format = format_enum
            self.format_config.text_length = length_enum

            if custom_instructions:
                self.format_config.custom_instructions = custom_instructions


        except ValueError:
            raise ValueError(f"Invalid format '{response_format}' or length '{text_length}'")

    def should_post_process(self) -> bool:
        """Check if post-processing should be applied"""
        return self.apply_method in ["post_process", "both"]
should_post_process()

Check if post-processing should be applied

Source code in toolboxv2/mods/isaa/base/Agent/types.py
751
752
753
def should_post_process(self) -> bool:
    """Check if post-processing should be applied"""
    return self.apply_method in ["post_process", "both"]
to_system_prompt_addition()

Convert persona to system prompt addition with format integration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
def to_system_prompt_addition(self) -> str:
    """Convert persona to system prompt addition with format integration"""
    if self.apply_method in ["system_prompt", "both"]:
        additions = []
        additions.append(f"You are {self.name}.")
        additions.append(f"Your communication style is {self.style} with a {self.tone} tone.")

        if self.personality_traits:
            traits_str = ", ".join(self.personality_traits)
            additions.append(f"Your key traits are: {traits_str}.")

        if self.custom_instructions:
            additions.append(self.custom_instructions)

        # Format-spezifische Anweisungen hinzufügen
        if self.format_config:
            additions.append("\n" + self.format_config.get_combined_instructions())

        return " ".join(additions)
    return ""
update_format(response_format, text_length, custom_instructions='')

Dynamische Format-Aktualisierung

Source code in toolboxv2/mods/isaa/base/Agent/types.py
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
def update_format(self, response_format: ResponseFormat|str, text_length: TextLength|str, custom_instructions: str = ""):
    """Dynamische Format-Aktualisierung"""
    try:
        format_enum = ResponseFormat(response_format) if isinstance(response_format, str) else response_format
        length_enum = TextLength(text_length) if isinstance(text_length, str) else text_length

        if not self.format_config:
            self.format_config = FormatConfig()

        self.format_config.response_format = format_enum
        self.format_config.text_length = length_enum

        if custom_instructions:
            self.format_config.custom_instructions = custom_instructions


    except ValueError:
        raise ValueError(f"Invalid format '{response_format}' or length '{text_length}'")
ProgressEvent dataclass

Enhanced progress event with better error handling

Source code in toolboxv2/mods/isaa/base/Agent/types.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
@dataclass
class ProgressEvent:

    """Enhanced progress event with better error handling"""

    # === 1. Kern-Attribute (Für jedes Event) ===
    event_type: str
    node_name: str
    timestamp: float = field(default_factory=time.time)
    event_id: str = field(default_factory=lambda: str(uuid.uuid4()))
    session_id: Optional[str] = None

    # === 2. Status und Ergebnis-Attribute ===
    status: Optional[NodeStatus] = None
    success: Optional[bool] = None
    duration: Optional[float] = None
    error_details: dict[str, Any] = field(default_factory=dict)  # Strukturiert: message, type, traceback

    # === 3. LLM-spezifische Attribute ===
    llm_model: Optional[str] = None
    llm_prompt_tokens: Optional[int] = None
    llm_completion_tokens: Optional[int] = None
    llm_total_tokens: Optional[int] = None
    llm_cost: Optional[float] = None
    llm_input: Optional[Any] = None  # Optional für Debugging, kann groß sein
    llm_output: Optional[str] = None # Optional für Debugging, kann groß sein

    # === 4. Tool-spezifische Attribute ===
    tool_name: Optional[str] = None
    is_meta_tool: Optional[bool] = None
    tool_args: Optional[dict[str, Any]] = None
    tool_result: Optional[Any] = None
    tool_error: Optional[str] = None
    llm_temperature: Optional[float]  = None

    # === 5. Strategie- und Kontext-Attribute ===
    agent_name: Optional[str] = None
    task_id: Optional[str] = None
    plan_id: Optional[str] = None


    # Node/Routing data
    routing_decision: Optional[str] = None
    node_phase: Optional[str] = None
    node_duration: Optional[float] = None

    # === 6. Metadaten (Für alles andere) ===
    metadata: dict[str, Any] = field(default_factory=dict)


    def __post_init__(self):

        if self.timestamp is None:
            self.timestamp = time.time()

        if self.metadata is None:
            self.metadata = {}
        if not self.event_id:
            self.event_id = f"{self.node_name}_{self.event_type}_{int(self.timestamp * 1000000)}"
        if 'error' in self.metadata or 'error_type' in self.metadata:
            if self.error_details is None:
                self.error_details = {}
            self.error_details['error'] = self.metadata.get('error')
            self.error_details['error_type'] = self.metadata.get('error_type')
            self.status = NodeStatus.FAILED
        if self.status == NodeStatus.FAILED:
            self.success = False
        if self.status == NodeStatus.COMPLETED:
            self.success = True

    def _to_dict(self) -> dict[str, Any]:
        """Convert ProgressEvent to dictionary with proper handling of all field types"""
        result = {}

        # Get all fields from the dataclass
        for field in fields(self):
            value = getattr(self, field.name)

            # Handle None values
            if value is None:
                result[field.name] = None
                continue

            # Handle NodeStatus enum
            if isinstance(value, NodeStatus | Enum):
                result[field.name] = value.value
            # Handle dataclass objects
            elif is_dataclass(value):
                result[field.name] = asdict(value)
            # Handle dictionaries (recursively process nested enums/dataclasses)
            elif isinstance(value, dict):
                result[field.name] = self._process_dict(value)
            # Handle lists (recursively process nested items)
            elif isinstance(value, list):
                result[field.name] = self._process_list(value)
            # Handle primitive types
            else:
                result[field.name] = value

        return result

    def _process_dict(self, d: dict[str, Any]) -> dict[str, Any]:
        """Recursively process dictionary values"""
        result = {}
        for k, v in d.items():
            if isinstance(v, Enum):
                result[k] = v.value
            elif is_dataclass(v):
                result[k] = asdict(v)
            elif isinstance(v, dict):
                result[k] = self._process_dict(v)
            elif isinstance(v, list):
                result[k] = self._process_list(v)
            else:
                result[k] = v
        return result

    def _process_list(self, lst: list[Any]) -> list[Any]:
        """Recursively process list items"""
        result = []
        for item in lst:
            if isinstance(item, Enum):
                result.append(item.value)
            elif is_dataclass(item):
                result.append(asdict(item))
            elif isinstance(item, dict):
                result.append(self._process_dict(item))
            elif isinstance(item, list):
                result.append(self._process_list(item))
            else:
                result.append(item)
        return result

    @classmethod
    def from_dict(cls, data: dict[str, Any]) -> 'ProgressEvent':
        """Create ProgressEvent from dictionary"""
        # Create a copy to avoid modifying the original
        data_copy = dict(data)

        # Handle NodeStatus enum conversion from string back to enum
        if 'status' in data_copy and data_copy['status'] is not None:
            if isinstance(data_copy['status'], str):
                try:
                    data_copy['status'] = NodeStatus(data_copy['status'])
                except (ValueError, TypeError):
                    # If invalid status value, set to None
                    data_copy['status'] = None

        # Filter out any keys that aren't valid dataclass fields
        field_names = {field.name for field in fields(cls)}
        filtered_data = {k: v for k, v in data_copy.items() if k in field_names}

        # Ensure metadata is properly initialized
        if 'metadata' not in filtered_data or filtered_data['metadata'] is None:
            filtered_data['metadata'] = {}

        return cls(**filtered_data)

    def to_dict(self) -> dict[str, Any]:
        """Return event data with None values removed for compact display"""
        data = self._to_dict()

        def clean_dict(d):
            if isinstance(d, dict):
                return {k: clean_dict(v) for k, v in d.items()
                        if v is not None and v != {} and v != [] and v != ''}
            elif isinstance(d, list):
                cleaned_list = [clean_dict(item) for item in d if item is not None]
                return [item for item in cleaned_list if item != {} and item != []]
            return d

        return clean_dict(data)

    def get_chat_display_data(self) -> dict[str, Any]:
        """Get data optimized for chat view display"""
        filtered = self.filter_none_values()

        # Core fields always shown
        core_data = {
            'event_type': filtered.get('event_type'),
            'node_name': filtered.get('node_name'),
            'timestamp': filtered.get('timestamp'),
            'event_id': filtered.get('event_id'),
            'status': filtered.get('status')
        }

        # Add specific fields based on event type
        if self.event_type == 'outline_created':
            if 'metadata' in filtered:
                core_data['outline_steps'] = len(filtered['metadata'].get('outline', []))
        elif self.event_type == 'reasoning_loop':
            if 'metadata' in filtered:
                core_data.update({
                    'loop_number': filtered['metadata'].get('loop_number'),
                    'outline_step': filtered['metadata'].get('outline_step'),
                    'context_size': filtered['metadata'].get('context_size')
                })
        elif self.event_type == 'tool_call':
            core_data.update({
                'tool_name': filtered.get('tool_name'),
                'is_meta_tool': filtered.get('is_meta_tool')
            })
        elif self.event_type == 'llm_call':
            core_data.update({
                'llm_model': filtered.get('llm_model'),
                'llm_total_tokens': filtered.get('llm_total_tokens'),
                'llm_cost': filtered.get('llm_cost')
            })

        # Remove None values from core_data
        return {k: v for k, v in core_data.items() if v is not None}

    def get_detailed_display_data(self) -> dict[str, Any]:
        """Get complete filtered data for detailed popup view"""
        return self.filter_none_values()

    def get_progress_summary(self) -> str:
        """Get a brief summary for progress sidebar"""
        if self.event_type == 'reasoning_loop' and 'metadata' in self.filter_none_values():
            metadata = self.filter_none_values()['metadata']
            loop_num = metadata.get('loop_number', '?')
            step = metadata.get('outline_step', '?')
            return f"Loop {loop_num}, Step {step}"
        elif self.event_type == 'tool_call':
            tool_name = self.tool_name or 'Unknown Tool'
            return f"{'Meta ' if self.is_meta_tool else ''}{tool_name}"
        elif self.event_type == 'llm_call':
            model = self.llm_model or 'Unknown Model'
            tokens = self.llm_total_tokens
            return f"{model} ({tokens} tokens)" if tokens else model
        else:
            return self.event_type.replace('_', ' ').title()
from_dict(data) classmethod

Create ProgressEvent from dictionary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
@classmethod
def from_dict(cls, data: dict[str, Any]) -> 'ProgressEvent':
    """Create ProgressEvent from dictionary"""
    # Create a copy to avoid modifying the original
    data_copy = dict(data)

    # Handle NodeStatus enum conversion from string back to enum
    if 'status' in data_copy and data_copy['status'] is not None:
        if isinstance(data_copy['status'], str):
            try:
                data_copy['status'] = NodeStatus(data_copy['status'])
            except (ValueError, TypeError):
                # If invalid status value, set to None
                data_copy['status'] = None

    # Filter out any keys that aren't valid dataclass fields
    field_names = {field.name for field in fields(cls)}
    filtered_data = {k: v for k, v in data_copy.items() if k in field_names}

    # Ensure metadata is properly initialized
    if 'metadata' not in filtered_data or filtered_data['metadata'] is None:
        filtered_data['metadata'] = {}

    return cls(**filtered_data)
get_chat_display_data()

Get data optimized for chat view display

Source code in toolboxv2/mods/isaa/base/Agent/types.py
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
def get_chat_display_data(self) -> dict[str, Any]:
    """Get data optimized for chat view display"""
    filtered = self.filter_none_values()

    # Core fields always shown
    core_data = {
        'event_type': filtered.get('event_type'),
        'node_name': filtered.get('node_name'),
        'timestamp': filtered.get('timestamp'),
        'event_id': filtered.get('event_id'),
        'status': filtered.get('status')
    }

    # Add specific fields based on event type
    if self.event_type == 'outline_created':
        if 'metadata' in filtered:
            core_data['outline_steps'] = len(filtered['metadata'].get('outline', []))
    elif self.event_type == 'reasoning_loop':
        if 'metadata' in filtered:
            core_data.update({
                'loop_number': filtered['metadata'].get('loop_number'),
                'outline_step': filtered['metadata'].get('outline_step'),
                'context_size': filtered['metadata'].get('context_size')
            })
    elif self.event_type == 'tool_call':
        core_data.update({
            'tool_name': filtered.get('tool_name'),
            'is_meta_tool': filtered.get('is_meta_tool')
        })
    elif self.event_type == 'llm_call':
        core_data.update({
            'llm_model': filtered.get('llm_model'),
            'llm_total_tokens': filtered.get('llm_total_tokens'),
            'llm_cost': filtered.get('llm_cost')
        })

    # Remove None values from core_data
    return {k: v for k, v in core_data.items() if v is not None}
get_detailed_display_data()

Get complete filtered data for detailed popup view

Source code in toolboxv2/mods/isaa/base/Agent/types.py
263
264
265
def get_detailed_display_data(self) -> dict[str, Any]:
    """Get complete filtered data for detailed popup view"""
    return self.filter_none_values()
get_progress_summary()

Get a brief summary for progress sidebar

Source code in toolboxv2/mods/isaa/base/Agent/types.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
def get_progress_summary(self) -> str:
    """Get a brief summary for progress sidebar"""
    if self.event_type == 'reasoning_loop' and 'metadata' in self.filter_none_values():
        metadata = self.filter_none_values()['metadata']
        loop_num = metadata.get('loop_number', '?')
        step = metadata.get('outline_step', '?')
        return f"Loop {loop_num}, Step {step}"
    elif self.event_type == 'tool_call':
        tool_name = self.tool_name or 'Unknown Tool'
        return f"{'Meta ' if self.is_meta_tool else ''}{tool_name}"
    elif self.event_type == 'llm_call':
        model = self.llm_model or 'Unknown Model'
        tokens = self.llm_total_tokens
        return f"{model} ({tokens} tokens)" if tokens else model
    else:
        return self.event_type.replace('_', ' ').title()
to_dict()

Return event data with None values removed for compact display

Source code in toolboxv2/mods/isaa/base/Agent/types.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def to_dict(self) -> dict[str, Any]:
    """Return event data with None values removed for compact display"""
    data = self._to_dict()

    def clean_dict(d):
        if isinstance(d, dict):
            return {k: clean_dict(v) for k, v in d.items()
                    if v is not None and v != {} and v != [] and v != ''}
        elif isinstance(d, list):
            cleaned_list = [clean_dict(item) for item in d if item is not None]
            return [item for item in cleaned_list if item != {} and item != []]
        return d

    return clean_dict(data)
ProgressTracker

Advanced progress tracking with cost calculation

Source code in toolboxv2/mods/isaa/base/Agent/types.py
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
class ProgressTracker:
    """Advanced progress tracking with cost calculation"""

    def __init__(self, progress_callback: callable  = None, agent_name="unknown"):
        self.progress_callback = progress_callback
        self.events: list[ProgressEvent] = []
        self.active_timers: dict[str, float] = {}

        # Cost tracking (simplified - would need actual provider pricing)
        self.token_costs = {
            "input": 0.00001,  # $0.01/1K tokens input
            "output": 0.00003,  # $0.03/1K tokens output
        }
        self.agent_name = agent_name

    async def emit_event(self, event: ProgressEvent):
        """Emit progress event with callback and storage"""
        self.events.append(event)
        event.agent_name = self.agent_name

        if self.progress_callback:
            try:
                if asyncio.iscoroutinefunction(self.progress_callback):
                    await self.progress_callback(event)
                else:
                    self.progress_callback(event)
            except Exception:
                import traceback
                print(traceback.format_exc())


    def start_timer(self, key: str) -> float:
        """Start timing operation"""
        start_time = time.perf_counter()
        self.active_timers[key] = start_time
        return start_time

    def end_timer(self, key: str) -> float:
        """End timing operation and return duration"""
        if key not in self.active_timers:
            return 0.0
        duration = time.perf_counter() - self.active_timers[key]
        del self.active_timers[key]
        return duration

    def calculate_llm_cost(self, model: str, input_tokens: int, output_tokens: int,completion_response:Any=None) -> float:
        """Calculate approximate LLM cost"""
        try:
            import litellm
            cost = litellm.completion_cost(model=model, completion_response=completion_response)
            return cost
        except ImportError:
            cost = 0.0
        # Simplified cost calculation - would need actual provider pricing
        input_cost = (input_tokens / 1000) * self.token_costs["input"]
        output_cost = (output_tokens / 1000) * self.token_costs["output"]
        return input_cost + output_cost

    def get_summary(self) -> dict[str, Any]:
        """Get comprehensive progress summary"""
        summary = {
            "total_events": len(self.events),
            "llm_calls": len([e for e in self.events if e.event_type == "llm_call"]),
            "tool_calls": len([e for e in self.events if e.event_type == "tool_call"]),
            "total_cost": sum(e.llm_cost for e in self.events if e.llm_cost),
            "total_tokens": sum(e.llm_total_tokens for e in self.events if e.llm_total_tokens),
            "total_duration": sum(e.node_duration for e in self.events if e.node_duration),
            "nodes_visited": list(set(e.node_name for e in self.events)),
            "tools_used": list(set(e.tool_name for e in self.events if e.tool_name)),
            "models_used": list(set(e.llm_model for e in self.events if e.llm_model))
        }
        return summary
calculate_llm_cost(model, input_tokens, output_tokens, completion_response=None)

Calculate approximate LLM cost

Source code in toolboxv2/mods/isaa/base/Agent/types.py
329
330
331
332
333
334
335
336
337
338
339
340
def calculate_llm_cost(self, model: str, input_tokens: int, output_tokens: int,completion_response:Any=None) -> float:
    """Calculate approximate LLM cost"""
    try:
        import litellm
        cost = litellm.completion_cost(model=model, completion_response=completion_response)
        return cost
    except ImportError:
        cost = 0.0
    # Simplified cost calculation - would need actual provider pricing
    input_cost = (input_tokens / 1000) * self.token_costs["input"]
    output_cost = (output_tokens / 1000) * self.token_costs["output"]
    return input_cost + output_cost
emit_event(event) async

Emit progress event with callback and storage

Source code in toolboxv2/mods/isaa/base/Agent/types.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
async def emit_event(self, event: ProgressEvent):
    """Emit progress event with callback and storage"""
    self.events.append(event)
    event.agent_name = self.agent_name

    if self.progress_callback:
        try:
            if asyncio.iscoroutinefunction(self.progress_callback):
                await self.progress_callback(event)
            else:
                self.progress_callback(event)
        except Exception:
            import traceback
            print(traceback.format_exc())
end_timer(key)

End timing operation and return duration

Source code in toolboxv2/mods/isaa/base/Agent/types.py
321
322
323
324
325
326
327
def end_timer(self, key: str) -> float:
    """End timing operation and return duration"""
    if key not in self.active_timers:
        return 0.0
    duration = time.perf_counter() - self.active_timers[key]
    del self.active_timers[key]
    return duration
get_summary()

Get comprehensive progress summary

Source code in toolboxv2/mods/isaa/base/Agent/types.py
342
343
344
345
346
347
348
349
350
351
352
353
354
355
def get_summary(self) -> dict[str, Any]:
    """Get comprehensive progress summary"""
    summary = {
        "total_events": len(self.events),
        "llm_calls": len([e for e in self.events if e.event_type == "llm_call"]),
        "tool_calls": len([e for e in self.events if e.event_type == "tool_call"]),
        "total_cost": sum(e.llm_cost for e in self.events if e.llm_cost),
        "total_tokens": sum(e.llm_total_tokens for e in self.events if e.llm_total_tokens),
        "total_duration": sum(e.node_duration for e in self.events if e.node_duration),
        "nodes_visited": list(set(e.node_name for e in self.events)),
        "tools_used": list(set(e.tool_name for e in self.events if e.tool_name)),
        "models_used": list(set(e.llm_model for e in self.events if e.llm_model))
    }
    return summary
start_timer(key)

Start timing operation

Source code in toolboxv2/mods/isaa/base/Agent/types.py
315
316
317
318
319
def start_timer(self, key: str) -> float:
    """Start timing operation"""
    start_time = time.perf_counter()
    self.active_timers[key] = start_time
    return start_time
Task dataclass
Source code in toolboxv2/mods/isaa/base/Agent/types.py
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
@dataclass
class Task:
    id: str
    type: str
    description: str
    status: str = "pending"  # pending, running, completed, failed, paused
    priority: int = 1
    dependencies: list[str] = field(default_factory=list)
    subtasks: list[str] = field(default_factory=list)
    result: Any = None
    error: str = None
    created_at: datetime = field(default_factory=datetime.now)
    started_at: datetime  = None
    completed_at: datetime  = None
    metadata: dict[str, Any] = field(default_factory=dict)
    retry_count: int = 0
    max_retries: int = 3
    critical: bool = False

    task_identification_attr: bool = True


    def __post_init__(self):
        """Ensure all mutable defaults are properly initialized"""
        if self.metadata is None:
            self.metadata = {}
        if self.dependencies is None:
            self.dependencies = []
        if self.subtasks is None:
            self.subtasks = []

    def __getitem__(self, key):
        return getattr(self, key)

    def __setitem__(self, key, value):
        setattr(self, key, value)
__post_init__()

Ensure all mutable defaults are properly initialized

Source code in toolboxv2/mods/isaa/base/Agent/types.py
444
445
446
447
448
449
450
451
def __post_init__(self):
    """Ensure all mutable defaults are properly initialized"""
    if self.metadata is None:
        self.metadata = {}
    if self.dependencies is None:
        self.dependencies = []
    if self.subtasks is None:
        self.subtasks = []
ToolAnalysis

Bases: BaseModel

Defines the structure for a valid tool analysis.

Source code in toolboxv2/mods/isaa/base/Agent/types.py
782
783
784
785
786
787
788
789
790
791
792
class ToolAnalysis(BaseModel):
    """Defines the structure for a valid tool analysis."""
    primary_function: str = Field(..., description="The main purpose of the tool.")
    use_cases: list[str] = Field(..., description="Specific use cases for the tool.")
    trigger_phrases: list[str] = Field(..., description="Phrases that should trigger the tool.")
    indirect_connections: list[str] = Field(..., description="Non-obvious connections or applications.")
    complexity_scenarios: list[str] = Field(..., description="Complex scenarios where the tool can be applied.")
    user_intent_categories: list[str] = Field(..., description="Categories of user intent the tool addresses.")
    confidence_triggers: dict[str, float] = Field(..., description="Phrases mapped to confidence scores.")
    tool_complexity: str = Field(..., description="The complexity of the tool, rated as low, medium, or high.")
    args_schema: dict[str, Any] | None = Field(..., description="The schema for the tool's arguments.")
ToolTask dataclass

Bases: Task

Spezialisierter Task für Tool-Aufrufe

Source code in toolboxv2/mods/isaa/base/Agent/types.py
483
484
485
486
487
488
489
490
@dataclass
class ToolTask(Task):
    """Spezialisierter Task für Tool-Aufrufe"""
    tool_name: str = ""
    arguments: dict[str, Any] = field(default_factory=dict)  # Kann {{ }} Referenzen enthalten
    hypothesis: str = ""  # Was erwarten wir von diesem Tool?
    validation_criteria: str = ""  # Wie validieren wir das Ergebnis?
    expectation: str = ""  # Wie sollte das Ergebnis aussehen?
create_task(task_type, **kwargs)

Factory für Task-Erstellung mit korrektem Typ

Source code in toolboxv2/mods/isaa/base/Agent/types.py
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
def create_task(task_type: str, **kwargs) -> Task:
    """Factory für Task-Erstellung mit korrektem Typ"""
    task_classes = {
        "llm_call": LLMTask,
        "tool_call": ToolTask,
        "decision": DecisionTask,
        "generic": Task,
        "LLMTask": LLMTask,
        "ToolTask": ToolTask,
        "DecisionTask": DecisionTask,
        "Task": Task,
    }

    task_class = task_classes.get(task_type, Task)

    # Standard-Felder setzen
    if "id" not in kwargs:
        kwargs["id"] = str(uuid.uuid4())
    if "type" not in kwargs:
        kwargs["type"] = task_type
    if "critical" not in kwargs:
        kwargs["critical"] = task_type in ["llm_call", "decision"]

    # Ensure metadata is initialized
    if "metadata" not in kwargs:
        kwargs["metadata"] = {}

    # Create task and ensure post_init is called
    task = task_class(**kwargs)

    # Double-check metadata initialization
    if not hasattr(task, 'metadata') or task.metadata is None:
        task.metadata = {}

    return task
utils
LLMMessage dataclass

Represents a message in a conversation with the LLM.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
@dataclass
class LLMMessage:
    """Represents a message in a conversation with the LLM."""
    role: str  # "user", "assistant", "system", "tool"
    # Content can be string or list (e.g., multimodal with text/image dicts)
    # Conforms to LiteLLM/OpenAI structure
    content: str | list[dict[str, Any]]
    tool_call_id: str | None = None  # For tool responses
    name: str | None = None  # For tool calls/responses (function name)

    def to_dict(self) -> dict:
        """Convert to dictionary, handling potential dataclass nuances."""
        d = {"role": self.role, "content": self.content}
        if self.tool_call_id:
            d["tool_call_id"] = self.tool_call_id
        if self.name:
            d["name"] = self.name
        return d
to_dict()

Convert to dictionary, handling potential dataclass nuances.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
144
145
146
147
148
149
150
151
def to_dict(self) -> dict:
    """Convert to dictionary, handling potential dataclass nuances."""
    d = {"role": self.role, "content": self.content}
    if self.tool_call_id:
        d["tool_call_id"] = self.tool_call_id
    if self.name:
        d["name"] = self.name
    return d
WorldModel dataclass

Thread-safe representation of the agent's persistent understanding of the world.

Source code in toolboxv2/mods/isaa/base/Agent/utils.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@dataclass
class WorldModel:
    """Thread-safe representation of the agent's persistent understanding of the world."""
    data: dict[str, Any] = dataclass_field(default_factory=dict)
    _lock: threading.Lock = dataclass_field(default_factory=threading.Lock)

    def get(self, key: str, default: Any = None) -> Any:
        with self._lock:
            return self.data.get(key, default)

    def set(self, key: str, value: Any):
        with self._lock:
            logger_wm.debug(f"WorldModel SET: {key} = {value}")
            self.data[key] = value

    def remove(self, key: str):
        with self._lock:
            if key in self.data:
                logger_wm.debug(f"WorldModel REMOVE: {key}")
                del self.data[key]

    def show(self) -> str:
        with self._lock:
            if not self.data:
                return "[empty]"
            try:
                items = [f"- {k}: {json.dumps(v, indent=None, ensure_ascii=False, default=str)}"
                         for k, v in self.data.items()]
                return "\n".join(items)
            except Exception:
                items = [f"- {k}: {str(v)}" for k, v in self.data.items()]
                return "\n".join(items)

    def to_dict(self) -> dict[str, Any]:
        with self._lock:
            # Deep copy might be needed if values are mutable and modified externally
            # For simplicity, shallow copy is used here.
            return self.data.copy()

    def update_from_dict(self, data_dict: dict[str, Any]):
        with self._lock:
            self.data.update(data_dict)
            logger_wm.debug(f"WorldModel updated from dict: {list(data_dict.keys())}")
AgentUtils
AISemanticMemory
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
class AISemanticMemory(metaclass=Singleton):
    def __init__(self,
                 base_path: str = "/semantic_memory",
                 default_model: str = os.getenv("BLITZMODEL"),
                 default_embedding_model: str = os.getenv("DEFAULTMODELEMBEDDING"),
                 default_similarity_threshold: float = 0.61,
                 default_batch_size: int = 64,
                 default_n_clusters: int = 2,
                 default_deduplication_threshold: float = 0.85):
        """
        Initialize AISemanticMemory with KnowledgeBase integration

        Args:
            base_path: Root directory for memory storage
            default_model: Default model for text generation
            default_embedding_model: Default embedding model
            default_similarity_threshold: Default similarity threshold for retrieval
            default_batch_size: Default batch size for processing
            default_n_clusters: Default number of clusters for FAISS
            default_deduplication_threshold: Default threshold for deduplication
        """
        self.base_path = os.path.join(os.getcwd(), ".data", base_path)
        self.memories: dict[str, KnowledgeBase] = {}

        # Map of embedding models to their dimensions
        self.embedding_dims = {
            "text-embedding-3-small": 1536,
            "text-embedding-3-large": 3072,
            "nomic-embed-text": 768,
            "default": 768
        }

        self.default_config = {
            "embedding_model": default_embedding_model,
            "embedding_dim": self._get_embedding_dim(default_embedding_model),
            "similarity_threshold": default_similarity_threshold,
            "batch_size": default_batch_size,
            "n_clusters": default_n_clusters,
            "deduplication_threshold": default_deduplication_threshold,
            "model_name": default_model
        }

    def _get_embedding_dim(self, model_name: str) -> int:
        """Get embedding dimension for a model"""
        return self.embedding_dims.get(model_name, 768)

    @staticmethod
    def _sanitize_name(name: str) -> str:
        """Sanitize memory name for filesystem safety"""
        name = re.sub(r'[^a-zA-Z0-9_-]', '-', name)[:63].strip('-')
        if not name:
            raise ValueError("Invalid memory name")
        if len(name) < 3:
            name += "Z" * (3 - len(name))
        return name

    def create_memory(self,
                      name: str,
                      model_config: dict | None = None,
                      storage_config: dict | None = None) -> KnowledgeBase:
        """
        Create new memory store with KnowledgeBase

        Args:
            name: Unique name for the memory store
            model_config: Configuration for embedding model
            storage_config: Configuration for KnowledgeBase parameters
        """
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            raise ValueError(f"Memory '{name}' already exists")

        # Determine embedding model and dimension
        embedding_model = self.default_config["embedding_model"]
        model_name = self.default_config["model_name"]
        if model_config:
            embedding_model = model_config.get("embedding_model", embedding_model)
            model_name = model_config.get("model_name", model_name)
        embedding_dim = self._get_embedding_dim(embedding_model)

        # Get KnowledgeBase parameters
        kb_params = {
            "embedding_dim": embedding_dim,
            "embedding_model": embedding_model,
            "similarity_threshold": self.default_config["similarity_threshold"],
            "batch_size": self.default_config["batch_size"],
            "n_clusters": self.default_config["n_clusters"],
            "deduplication_threshold": self.default_config["deduplication_threshold"],
            "model_name": model_name,
        }

        if storage_config:
            kb_params.update({
                "similarity_threshold": storage_config.get("similarity_threshold", kb_params["similarity_threshold"]),
                "batch_size": storage_config.get("batch_size", kb_params["batch_size"]),
                "n_clusters": storage_config.get("n_clusters", kb_params["n_clusters"]),
                "model_name": storage_config.get("model_name", kb_params["model_name"]),
                "embedding_model": storage_config.get("embedding_model", kb_params["embedding_model"]),
                "deduplication_threshold": storage_config.get("deduplication_threshold",
                                                              kb_params["deduplication_threshold"]),
            })

        # Create KnowledgeBase instance
        self.memories[sanitized] = KnowledgeBase(**kb_params)
        return self.memories[sanitized]

    async def add_data(self,
                       memory_name: str,
                       data: str | list[str] | bytes | dict,
                       metadata: dict | None = None, direct=False) -> bool:
        """
        Add data to memory store

        Args:
            memory_name: Target memory store
            data: Text, list of texts, binary file, or structured data
            metadata: Optional metadata
        """
        name = self._sanitize_name(memory_name)
        kb = self.memories.get(name)
        if not kb:
            kb = self.create_memory(name)

        # Process input data
        texts = []
        if isinstance(data, bytes):
            try:
                text = extract_text_natively(data, filename="" if metadata is None else metadata.get("filename", ""))
                texts = [text.replace('\\t', '').replace('\t', '')]
            except Exception as e:
                raise ValueError(f"File processing failed: {str(e)}")
        elif isinstance(data, str):
            texts = [data.replace('\\t', '').replace('\t', '')]
        elif isinstance(data, list):
            texts = [d.replace('\\t', '').replace('\t', '') for d in data]
        elif isinstance(data, dict):
            # Custom KG not supported in current KnowledgeBase
            raise NotImplementedError("Custom knowledge graph insertion not supported")
        else:
            raise ValueError("Unsupported data type")

        # Add data to KnowledgeBase
        try:
            added, duplicates = await kb.add_data(texts, metadata, direct=direct)
            return added > 0
        except Exception as e:
            import traceback
            print(traceback.format_exc())
            raise RuntimeError(f"Data addition failed: {str(e)}")

    def get(self, names):
        return [m for n,m in self._get_target_memories(names)]

    async def query(self,
                    query: str,
                    memory_names: str | list[str] | None = None,
                    query_params: dict | None = None,
                    to_str: bool = False,
                    unified_retrieve: bool =False) -> str | list[dict]:
        """
        Query memories using KnowledgeBase retrieval

        Args:
            query: Search query
            memory_names: Target memory names
            query_params: Query parameters
            to_str: Return string format
            unified_retrieve: Unified retrieve
        """
        targets = self._get_target_memories(memory_names)
        if not targets:
            return []

        results = []
        for name, kb in targets:
            #try:
                # Use KnowledgeBase's retrieve_with_overview for comprehensive results
                result = await kb.retrieve_with_overview(
                    query=query,
                    k=query_params.get("k", 3) if query_params else 3,
                    min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                    cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                    max_cross_refs=query_params.get("max_cross_refs", 2) if query_params else 2,
                    max_sentences=query_params.get("max_sentences", 5) if query_params else 5
                ) if not unified_retrieve else await kb.unified_retrieve(
                    query=query,
                    k=query_params.get("k", 2) if query_params else 2,
                    min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                    cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                    max_cross_refs=query_params.get("max_cross_refs", 6) if query_params else 6,
                    max_sentences=query_params.get("max_sentences", 12) if query_params else 12
                )
                results.append({
                    "memory": name,
                    "result": result
                })
            #except Exception as e:
            #    print(f"Query failed on {name}: {str(e)}")
        if to_str:
            if not unified_retrieve:
                str_res = [
                    f"{x['memory']} - {json.dumps(x['result'].overview)}\n - {[c.text for c in x['result'].details]}\n - {[(k, [c.text for c in v]) for k, v in x['result'].cross_references.items()]}"
                    for x in results]
                # str_res =
            else:
                str_res = json.dumps(results)
            return str_res
        return results

    def _get_target_memories(self, memory_names: str | list[str] | None) -> list[tuple[str, KnowledgeBase]]:
        """Get target memories for query"""
        if not memory_names:
            return list(self.memories.items())

        names = [memory_names] if isinstance(memory_names, str) else memory_names

        targets = []
        for name in names:
            sanitized = self._sanitize_name(name)
            if kb := self.memories.get(sanitized):
                targets.append((sanitized, kb))
        return targets

    def list_memories(self) -> list[str]:
        """List all available memories"""
        return list(self.memories.keys())

    async def delete_memory(self, name: str) -> bool:
        """Delete a memory store"""
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            del self.memories[sanitized]
            return True
        return False

    def save_memory(self, name: str, path: str) -> bool | bytes:
        """Save a memory store to disk"""
        sanitized = self._sanitize_name(name)
        if kb := self.memories.get(sanitized):
            try:
                return kb.save(path)
            except Exception as e:
                print(f"Error saving memory: {str(e)}")
                return False
        return False

    def save_all_memories(self, path: str) -> bool:
        """Save all memory stores to disk"""
        for name, kb in self.memories.items():
            try:
                kb.save(os.path.join(path, f"{name}.pkl"))
            except Exception as e:
                print(f"Error saving memory: {str(e)}")
                return False
        return True

    def load_all_memories(self, path: str) -> bool:
        """Load all memory stores from disk"""
        for file in os.listdir(path):
            if file.endswith(".pkl"):
                try:
                    self.memories[file[:-4]] = KnowledgeBase.load(os.path.join(path, file))
                except EOFError:
                    return False
                except FileNotFoundError:
                    return False
                except Exception as e:
                    print(f"Error loading memory: {str(e)}")
                    return False
        return True

    def load_memory(self, name: str, path: str | bytes) -> bool:
        """Load a memory store from disk"""
        sanitized = self._sanitize_name(name)
        if sanitized in self.memories:
            return False
        try:
            self.memories[sanitized] = KnowledgeBase.load(path)
            return True
        except Exception:
            # print(f"Error loading memory: {str(e)}")
            return False
__init__(base_path='/semantic_memory', default_model=os.getenv('BLITZMODEL'), default_embedding_model=os.getenv('DEFAULTMODELEMBEDDING'), default_similarity_threshold=0.61, default_batch_size=64, default_n_clusters=2, default_deduplication_threshold=0.85)

Initialize AISemanticMemory with KnowledgeBase integration

Parameters:

Name Type Description Default
base_path str

Root directory for memory storage

'/semantic_memory'
default_model str

Default model for text generation

getenv('BLITZMODEL')
default_embedding_model str

Default embedding model

getenv('DEFAULTMODELEMBEDDING')
default_similarity_threshold float

Default similarity threshold for retrieval

0.61
default_batch_size int

Default batch size for processing

64
default_n_clusters int

Default number of clusters for FAISS

2
default_deduplication_threshold float

Default threshold for deduplication

0.85
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
def __init__(self,
             base_path: str = "/semantic_memory",
             default_model: str = os.getenv("BLITZMODEL"),
             default_embedding_model: str = os.getenv("DEFAULTMODELEMBEDDING"),
             default_similarity_threshold: float = 0.61,
             default_batch_size: int = 64,
             default_n_clusters: int = 2,
             default_deduplication_threshold: float = 0.85):
    """
    Initialize AISemanticMemory with KnowledgeBase integration

    Args:
        base_path: Root directory for memory storage
        default_model: Default model for text generation
        default_embedding_model: Default embedding model
        default_similarity_threshold: Default similarity threshold for retrieval
        default_batch_size: Default batch size for processing
        default_n_clusters: Default number of clusters for FAISS
        default_deduplication_threshold: Default threshold for deduplication
    """
    self.base_path = os.path.join(os.getcwd(), ".data", base_path)
    self.memories: dict[str, KnowledgeBase] = {}

    # Map of embedding models to their dimensions
    self.embedding_dims = {
        "text-embedding-3-small": 1536,
        "text-embedding-3-large": 3072,
        "nomic-embed-text": 768,
        "default": 768
    }

    self.default_config = {
        "embedding_model": default_embedding_model,
        "embedding_dim": self._get_embedding_dim(default_embedding_model),
        "similarity_threshold": default_similarity_threshold,
        "batch_size": default_batch_size,
        "n_clusters": default_n_clusters,
        "deduplication_threshold": default_deduplication_threshold,
        "model_name": default_model
    }
add_data(memory_name, data, metadata=None, direct=False) async

Add data to memory store

Parameters:

Name Type Description Default
memory_name str

Target memory store

required
data str | list[str] | bytes | dict

Text, list of texts, binary file, or structured data

required
metadata dict | None

Optional metadata

None
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
async def add_data(self,
                   memory_name: str,
                   data: str | list[str] | bytes | dict,
                   metadata: dict | None = None, direct=False) -> bool:
    """
    Add data to memory store

    Args:
        memory_name: Target memory store
        data: Text, list of texts, binary file, or structured data
        metadata: Optional metadata
    """
    name = self._sanitize_name(memory_name)
    kb = self.memories.get(name)
    if not kb:
        kb = self.create_memory(name)

    # Process input data
    texts = []
    if isinstance(data, bytes):
        try:
            text = extract_text_natively(data, filename="" if metadata is None else metadata.get("filename", ""))
            texts = [text.replace('\\t', '').replace('\t', '')]
        except Exception as e:
            raise ValueError(f"File processing failed: {str(e)}")
    elif isinstance(data, str):
        texts = [data.replace('\\t', '').replace('\t', '')]
    elif isinstance(data, list):
        texts = [d.replace('\\t', '').replace('\t', '') for d in data]
    elif isinstance(data, dict):
        # Custom KG not supported in current KnowledgeBase
        raise NotImplementedError("Custom knowledge graph insertion not supported")
    else:
        raise ValueError("Unsupported data type")

    # Add data to KnowledgeBase
    try:
        added, duplicates = await kb.add_data(texts, metadata, direct=direct)
        return added > 0
    except Exception as e:
        import traceback
        print(traceback.format_exc())
        raise RuntimeError(f"Data addition failed: {str(e)}")
create_memory(name, model_config=None, storage_config=None)

Create new memory store with KnowledgeBase

Parameters:

Name Type Description Default
name str

Unique name for the memory store

required
model_config dict | None

Configuration for embedding model

None
storage_config dict | None

Configuration for KnowledgeBase parameters

None
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
def create_memory(self,
                  name: str,
                  model_config: dict | None = None,
                  storage_config: dict | None = None) -> KnowledgeBase:
    """
    Create new memory store with KnowledgeBase

    Args:
        name: Unique name for the memory store
        model_config: Configuration for embedding model
        storage_config: Configuration for KnowledgeBase parameters
    """
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        raise ValueError(f"Memory '{name}' already exists")

    # Determine embedding model and dimension
    embedding_model = self.default_config["embedding_model"]
    model_name = self.default_config["model_name"]
    if model_config:
        embedding_model = model_config.get("embedding_model", embedding_model)
        model_name = model_config.get("model_name", model_name)
    embedding_dim = self._get_embedding_dim(embedding_model)

    # Get KnowledgeBase parameters
    kb_params = {
        "embedding_dim": embedding_dim,
        "embedding_model": embedding_model,
        "similarity_threshold": self.default_config["similarity_threshold"],
        "batch_size": self.default_config["batch_size"],
        "n_clusters": self.default_config["n_clusters"],
        "deduplication_threshold": self.default_config["deduplication_threshold"],
        "model_name": model_name,
    }

    if storage_config:
        kb_params.update({
            "similarity_threshold": storage_config.get("similarity_threshold", kb_params["similarity_threshold"]),
            "batch_size": storage_config.get("batch_size", kb_params["batch_size"]),
            "n_clusters": storage_config.get("n_clusters", kb_params["n_clusters"]),
            "model_name": storage_config.get("model_name", kb_params["model_name"]),
            "embedding_model": storage_config.get("embedding_model", kb_params["embedding_model"]),
            "deduplication_threshold": storage_config.get("deduplication_threshold",
                                                          kb_params["deduplication_threshold"]),
        })

    # Create KnowledgeBase instance
    self.memories[sanitized] = KnowledgeBase(**kb_params)
    return self.memories[sanitized]
delete_memory(name) async

Delete a memory store

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
790
791
792
793
794
795
796
async def delete_memory(self, name: str) -> bool:
    """Delete a memory store"""
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        del self.memories[sanitized]
        return True
    return False
list_memories()

List all available memories

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
786
787
788
def list_memories(self) -> list[str]:
    """List all available memories"""
    return list(self.memories.keys())
load_all_memories(path)

Load all memory stores from disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
819
820
821
822
823
824
825
826
827
828
829
830
831
832
def load_all_memories(self, path: str) -> bool:
    """Load all memory stores from disk"""
    for file in os.listdir(path):
        if file.endswith(".pkl"):
            try:
                self.memories[file[:-4]] = KnowledgeBase.load(os.path.join(path, file))
            except EOFError:
                return False
            except FileNotFoundError:
                return False
            except Exception as e:
                print(f"Error loading memory: {str(e)}")
                return False
    return True
load_memory(name, path)

Load a memory store from disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
834
835
836
837
838
839
840
841
842
843
844
def load_memory(self, name: str, path: str | bytes) -> bool:
    """Load a memory store from disk"""
    sanitized = self._sanitize_name(name)
    if sanitized in self.memories:
        return False
    try:
        self.memories[sanitized] = KnowledgeBase.load(path)
        return True
    except Exception:
        # print(f"Error loading memory: {str(e)}")
        return False
query(query, memory_names=None, query_params=None, to_str=False, unified_retrieve=False) async

Query memories using KnowledgeBase retrieval

Parameters:

Name Type Description Default
query str

Search query

required
memory_names str | list[str] | None

Target memory names

None
query_params dict | None

Query parameters

None
to_str bool

Return string format

False
unified_retrieve bool

Unified retrieve

False
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
async def query(self,
                query: str,
                memory_names: str | list[str] | None = None,
                query_params: dict | None = None,
                to_str: bool = False,
                unified_retrieve: bool =False) -> str | list[dict]:
    """
    Query memories using KnowledgeBase retrieval

    Args:
        query: Search query
        memory_names: Target memory names
        query_params: Query parameters
        to_str: Return string format
        unified_retrieve: Unified retrieve
    """
    targets = self._get_target_memories(memory_names)
    if not targets:
        return []

    results = []
    for name, kb in targets:
        #try:
            # Use KnowledgeBase's retrieve_with_overview for comprehensive results
            result = await kb.retrieve_with_overview(
                query=query,
                k=query_params.get("k", 3) if query_params else 3,
                min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                max_cross_refs=query_params.get("max_cross_refs", 2) if query_params else 2,
                max_sentences=query_params.get("max_sentences", 5) if query_params else 5
            ) if not unified_retrieve else await kb.unified_retrieve(
                query=query,
                k=query_params.get("k", 2) if query_params else 2,
                min_similarity=query_params.get("min_similarity", 0.2) if query_params else 0.2,
                cross_ref_depth=query_params.get("cross_ref_depth", 2) if query_params else 2,
                max_cross_refs=query_params.get("max_cross_refs", 6) if query_params else 6,
                max_sentences=query_params.get("max_sentences", 12) if query_params else 12
            )
            results.append({
                "memory": name,
                "result": result
            })
        #except Exception as e:
        #    print(f"Query failed on {name}: {str(e)}")
    if to_str:
        if not unified_retrieve:
            str_res = [
                f"{x['memory']} - {json.dumps(x['result'].overview)}\n - {[c.text for c in x['result'].details]}\n - {[(k, [c.text for c in v]) for k, v in x['result'].cross_references.items()]}"
                for x in results]
            # str_res =
        else:
            str_res = json.dumps(results)
        return str_res
    return results
save_all_memories(path)

Save all memory stores to disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
809
810
811
812
813
814
815
816
817
def save_all_memories(self, path: str) -> bool:
    """Save all memory stores to disk"""
    for name, kb in self.memories.items():
        try:
            kb.save(os.path.join(path, f"{name}.pkl"))
        except Exception as e:
            print(f"Error saving memory: {str(e)}")
            return False
    return True
save_memory(name, path)

Save a memory store to disk

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
798
799
800
801
802
803
804
805
806
807
def save_memory(self, name: str, path: str) -> bool | bytes:
    """Save a memory store to disk"""
    sanitized = self._sanitize_name(name)
    if kb := self.memories.get(sanitized):
        try:
            return kb.save(path)
        except Exception as e:
            print(f"Error saving memory: {str(e)}")
            return False
    return False
PyEnvEval
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
class PyEnvEval:
    def __init__(self):
        self.local_env = locals().copy()
        self.global_env = {'local_env': self.local_env}  # globals().copy()

    def eval_code(self, code):
        try:
            exec(code, self.global_env, self.local_env)
            result = eval(code, self.global_env, self.local_env)
            return self.format_output(result)
        except Exception as e:
            return self.format_output(str(e))

    def get_env(self):
        local_env_str = self.format_env(self.local_env)
        return f'Locals:\n{local_env_str}'

    @staticmethod
    def format_output(output):
        return f'Ergebnis: {output}'

    @staticmethod
    def format_env(env):
        return '\n'.join(f'{key}: {value}' for key, value in env.items())

    def run_and_display(self, python_code):
        """function to eval python code"""
        start = f'Start-state:\n{self.get_env()}'
        result = self.eval_code(python_code)
        end = f'End-state:\n{self.get_env()}'
        return f'{start}\nResult:\n{result}\n{end}'

    def tool(self):
        return {"PythonEval": {"func": self.run_and_display, "description": "Use Python Code to Get to an Persis Answer! input must be valid python code all non code parts must be comments!"}}
run_and_display(python_code)

function to eval python code

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1045
1046
1047
1048
1049
1050
def run_and_display(self, python_code):
    """function to eval python code"""
    start = f'Start-state:\n{self.get_env()}'
    result = self.eval_code(python_code)
    end = f'End-state:\n{self.get_env()}'
    return f'{start}\nResult:\n{result}\n{end}'
Scripts
Source code in toolboxv2/mods/isaa/base/AgentUtils.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
class Scripts:
    def __init__(self, filename):
        self.scripts = {}
        self.filename = filename
        self.temp_dir = Path(tempfile.gettempdir()) / "agent_scripts"
        self.temp_dir.mkdir(exist_ok=True)

    def create_script(self, name: str, description: str, content: str, script_type: str = "py", dependencies: str = ""):
        """Create a script with optional dependencies"""
        if not name.replace('_', '').replace('-', '').isalnum():
            return "Error: Script name must be alphanumeric (with _ or - allowed)"

        self.scripts[name] = {
            "description": description,
            "content": content,
            "type": script_type.lower(),
            "dependencies": dependencies.strip()
        }
        return f"✅ Script '{name}' created! Your capabilities have been extended."

    def remove_script(self, name: str):
        """Remove a script"""
        if name in self.scripts:
            del self.scripts[name]
            return f"✅ Script '{name}' removed!"
        return f"❌ Script '{name}' not found!"

    def run_script(self, name: str, args: str = ""):
        """Run a script with optional arguments - async-safe wrapper"""
        try:
            # Get the current event loop
            loop = asyncio.get_event_loop()
            # Run the async version
            return loop.run_until_complete(self._run_script_async(name, args))
        except RuntimeError:
            # If no event loop, run synchronously (fallback)
            return self._run_script_sync(name, args)

    async def _run_script_async(self, name: str, args: str = ""):
        """Async version of run_script"""
        if name not in self.scripts:
            return f"❌ Script '{name}' not found! Use listScripts to see available scripts."

        script = self.scripts[name]
        script_type = script["type"]

        # Create temporary script file
        temp_script = self.temp_dir / f"{name}_{os.getpid()}.{script_type}"

        try:
            with open(temp_script, "w", encoding="utf-8") as f:
                f.write(script["content"])

            # Parse arguments safely
            script_args = args.split() if args.strip() else []

            if script_type == "py":
                return await self._run_python_script_async(temp_script, script_args, script.get("dependencies", ""))
            elif script_type in ["sh", "bash"]:
                return await self._run_shell_script_async(temp_script, script_args)
            else:
                return f"❌ Unsupported script type: {script_type}. Use 'py' or 'sh'"

        except Exception as e:
            return f"❌ Error running script: {str(e)}"
        finally:
            if temp_script.exists():
                temp_script.unlink()

    def _run_script_sync(self, name: str, args: str = ""):
        """Synchronous fallback version"""
        if name not in self.scripts:
            return f"❌ Script '{name}' not found! Use listScripts to see available scripts."

        script = self.scripts[name]
        script_type = script["type"]

        # Create temporary script file
        temp_script = self.temp_dir / f"{name}_{os.getpid()}.{script_type}"

        try:
            with open(temp_script, "w", encoding="utf-8") as f:
                f.write(script["content"])

            # Parse arguments safely
            script_args = args.split() if args.strip() else []

            if script_type == "py":
                return self._run_python_script_sync(temp_script, script_args, script.get("dependencies", ""))
            elif script_type in ["sh", "bash"]:
                return self._run_shell_script_sync(temp_script, script_args)
            else:
                return f"❌ Unsupported script type: {script_type}. Use 'py' or 'sh'"

        except Exception as e:
            return f"❌ Error running script: {str(e)}"
        finally:
            if temp_script.exists():
                temp_script.unlink()

    async def _run_python_script_async(self, script_path: Path, args: list, dependencies: str):
        """Run Python script async with uv dependency management"""
        cmd = []

        if dependencies.strip():
            if shutil.which("uv"):
                dep_list = [dep.strip() for dep in dependencies.replace('\n', ' ').split() if dep.strip()]
                cmd = ["uv", "run"]
                for dep in dep_list:
                    cmd.extend(["--with", dep])
                cmd.extend([sys.executable, str(script_path)] + args)
            else:
                return "❌ uv not found. Install uv for dependency management: `pip install uv`"
        else:
            cmd = [sys.executable, str(script_path)] + args

        return await self._execute_command_async(cmd)

    async def _run_shell_script_async(self, script_path: Path, args: list):
        """Run shell script async cross-platform"""
        if platform.system() == "Windows":
            cmd = ["cmd", "/c", str(script_path)] + args
        else:
            cmd = ["sh", str(script_path)] + args

        return await self._execute_command_async(cmd)

    def _run_python_script_sync(self, script_path: Path, args: list, dependencies: str):
        """Run Python script sync with uv dependency management"""

        cmd = []

        if dependencies.strip():
            if shutil.which("uv"):
                dep_list = [dep.strip() for dep in dependencies.replace('\n', ' ').split() if dep.strip()]
                cmd = ["uv", "run"]
                for dep in dep_list:
                    cmd.extend(["--with", dep])
                cmd.extend([sys.executable, str(script_path)] + args)
            else:
                return "❌ uv not found. Install uv for dependency management: `pip install uv`"
        else:
            cmd = [sys.executable, str(script_path)] + args

        return self._execute_command_sync(cmd)

    def _run_shell_script_sync(self, script_path: Path, args: list):
        """Run shell script sync cross-platform"""
        if platform.system() == "Windows":
            cmd = ["cmd", "/c", str(script_path)] + args
        else:
            cmd = ["sh", str(script_path)] + args

        return self._execute_command_sync(cmd)

    async def _execute_command_async(self, cmd: list, timeout: int = 60):
        """Execute command async safely with timeout"""
        try:
            process = await asyncio.create_subprocess_exec(
                *cmd,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE,
                cwd=str(self.temp_dir),
                text=False,
            )

            try:
                stdout, stderr = await asyncio.wait_for(
                    process.communicate(),
                    timeout=timeout
                )
            except TimeoutError:
                process.kill()
                await process.wait()
                return f"⏱️ Script timed out after {timeout} seconds"
            output = remove_styles(safe_decode(stdout))
            if stderr:
                error_msg = remove_styles(safe_decode(stderr))
                output += f"\n🔴 STDERR: {error_msg}"

            if process.returncode != 0:
                output += f"\n⚠️  Exit code: {process.returncode}"

            return output.strip() if output.strip() else "✅ Script completed (no output)"

        except Exception as e:
            return f"❌ Execution error: {str(e)}"

    def _execute_command_sync(self, cmd: list, timeout: int = 60):
        """Execute command sync safely with timeout"""

        try:
            result = subprocess.run(
                cmd,
                capture_output=True,
                text=False,
                timeout=timeout,
                cwd=str(self.temp_dir)
            )

            output = remove_styles(safe_decode(result.stdout))
            if result.stderr:
                output += f"\n🔴 STDERR: {remove_styles(safe_decode(result.stderr))}"

            if result.returncode != 0:
                output += f"\n⚠️  Exit code: {result.returncode}"

            return output.strip() if output.strip() else "✅ Script completed (no output)"

        except subprocess.TimeoutExpired:
            return f"⏱️ Script timed out after {timeout} seconds"
        except Exception as e:
            return f"❌ Execution error: {str(e)}"

    def get_scripts_list(self):
        """Get formatted list of all scripts"""
        if not self.scripts:
            return "📝 No scripts available. Create scripts to extend your capabilities!"

        result = ["🔧 Available Enhanced Capabilities:"]
        for name, script in self.scripts.items():
            deps = f" [deps: {script['dependencies']}]" if script.get('dependencies') else ""
            result.append(f"  • {name} ({script['type']}){deps}: {script['description']}")

        return "\n".join(result)

    def save_scripts(self):
        """Save scripts to persistent storage"""
        try:
            os.makedirs(os.path.dirname(self.filename) if os.path.dirname(self.filename) else ".", exist_ok=True)
            with open(f"{self.filename}.pkl", "wb") as f:
                pickle.dump(self.scripts, f)
            return "💾 Scripts saved successfully!"
        except Exception as e:
            return f"❌ Save error: {str(e)}"

    def load_scripts(self):
        """Load scripts from persistent storage"""
        try:
            if os.path.exists(f"{self.filename}.pkl"):
                with open(f"{self.filename}.pkl", "rb") as f:
                    data = f.read()
                if data:
                    self.scripts = pickle.loads(data)
            else:
                os.makedirs(os.path.dirname(self.filename) if os.path.dirname(self.filename) else ".", exist_ok=True)
                open(f"{self.filename}.pkl", "a").close()
        except Exception as e:
            print(f"Load error: {str(e)}")
create_script(name, description, content, script_type='py', dependencies='')

Create a script with optional dependencies

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
137
138
139
140
141
142
143
144
145
146
147
148
def create_script(self, name: str, description: str, content: str, script_type: str = "py", dependencies: str = ""):
    """Create a script with optional dependencies"""
    if not name.replace('_', '').replace('-', '').isalnum():
        return "Error: Script name must be alphanumeric (with _ or - allowed)"

    self.scripts[name] = {
        "description": description,
        "content": content,
        "type": script_type.lower(),
        "dependencies": dependencies.strip()
    }
    return f"✅ Script '{name}' created! Your capabilities have been extended."
get_scripts_list()

Get formatted list of all scripts

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
344
345
346
347
348
349
350
351
352
353
354
def get_scripts_list(self):
    """Get formatted list of all scripts"""
    if not self.scripts:
        return "📝 No scripts available. Create scripts to extend your capabilities!"

    result = ["🔧 Available Enhanced Capabilities:"]
    for name, script in self.scripts.items():
        deps = f" [deps: {script['dependencies']}]" if script.get('dependencies') else ""
        result.append(f"  • {name} ({script['type']}){deps}: {script['description']}")

    return "\n".join(result)
load_scripts()

Load scripts from persistent storage

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
366
367
368
369
370
371
372
373
374
375
376
377
378
def load_scripts(self):
    """Load scripts from persistent storage"""
    try:
        if os.path.exists(f"{self.filename}.pkl"):
            with open(f"{self.filename}.pkl", "rb") as f:
                data = f.read()
            if data:
                self.scripts = pickle.loads(data)
        else:
            os.makedirs(os.path.dirname(self.filename) if os.path.dirname(self.filename) else ".", exist_ok=True)
            open(f"{self.filename}.pkl", "a").close()
    except Exception as e:
        print(f"Load error: {str(e)}")
remove_script(name)

Remove a script

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
150
151
152
153
154
155
def remove_script(self, name: str):
    """Remove a script"""
    if name in self.scripts:
        del self.scripts[name]
        return f"✅ Script '{name}' removed!"
    return f"❌ Script '{name}' not found!"
run_script(name, args='')

Run a script with optional arguments - async-safe wrapper

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
157
158
159
160
161
162
163
164
165
166
def run_script(self, name: str, args: str = ""):
    """Run a script with optional arguments - async-safe wrapper"""
    try:
        # Get the current event loop
        loop = asyncio.get_event_loop()
        # Run the async version
        return loop.run_until_complete(self._run_script_async(name, args))
    except RuntimeError:
        # If no event loop, run synchronously (fallback)
        return self._run_script_sync(name, args)
save_scripts()

Save scripts to persistent storage

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
356
357
358
359
360
361
362
363
364
def save_scripts(self):
    """Save scripts to persistent storage"""
    try:
        os.makedirs(os.path.dirname(self.filename) if os.path.dirname(self.filename) else ".", exist_ok=True)
        with open(f"{self.filename}.pkl", "wb") as f:
            pickle.dump(self.scripts, f)
        return "💾 Scripts saved successfully!"
    except Exception as e:
        return f"❌ Save error: {str(e)}"
anything_from_str_to_dict(data, expected_keys=None, mini_task=lambda x: '')

Versucht, einen String in ein oder mehrere Dictionaries umzuwandeln. Berücksichtigt dabei die erwarteten Schlüssel und ihre Standardwerte.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
def anything_from_str_to_dict(data: str, expected_keys: dict = None, mini_task=lambda x: ''):
    """
    Versucht, einen String in ein oder mehrere Dictionaries umzuwandeln.
    Berücksichtigt dabei die erwarteten Schlüssel und ihre Standardwerte.
    """
    if len(data) < 4:
        return []

    if expected_keys is None:
        expected_keys = {}

    result = []
    json_objects = find_json_objects_in_str(data)
    if not json_objects and data.startswith('[') and data.endswith(']'):
        json_objects = eval(data)
    if json_objects and len(json_objects) > 0 and isinstance(json_objects[0], dict):
        result.extend([{**expected_keys, **ob} for ob in json_objects])
    if not result:
        completed_object = complete_json_object(data, mini_task)
        if completed_object is not None:
            result.append(completed_object)
    if len(result) == 0 and expected_keys:
        result = [{list(expected_keys.keys())[0]: data}]
    for res in result:
        if isinstance(res, list) and len(res) > 0:
            res = res[0]
        for key, value in expected_keys.items():
            if key not in res:
                res[key] = value

    if len(result) == 0:
        fixed = fix_json(data)
        if fixed:
            result.append(fixed)

    return result
complete_json_object(data, mini_task)

Ruft eine Funktion auf, um einen String in das richtige Format zu bringen. Gibt das resultierende JSON-Objekt zurück, wenn die Funktion erfolgreich ist, sonst None.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
def complete_json_object(data: str, mini_task):
    """
    Ruft eine Funktion auf, um einen String in das richtige Format zu bringen.
    Gibt das resultierende JSON-Objekt zurück, wenn die Funktion erfolgreich ist, sonst None.
    """
    ret = mini_task(
        f"Vervollständige das Json Object. Und bringe den string in das Richtige format. data={data}\nJson=")
    if ret:
        return anything_from_str_to_dict(ret)
    return None
detect_shell()

Detects the best available shell and the argument to execute a command. Returns: A tuple of (shell_executable, command_argument). e.g., ('/bin/bash', '-c') or ('powershell.exe', '-Command')

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def detect_shell() -> tuple[str, str]:
    """
    Detects the best available shell and the argument to execute a command.
    Returns:
        A tuple of (shell_executable, command_argument).
        e.g., ('/bin/bash', '-c') or ('powershell.exe', '-Command')
    """
    if platform.system() == "Windows":
        if shell_path := shutil.which("pwsh"):
            return shell_path, "-Command"
        if shell_path := shutil.which("powershell"):
            return shell_path, "-Command"
        return "cmd.exe", "/c"

    shell_env = os.environ.get("SHELL")
    if shell_env and shutil.which(shell_env):
        return shell_env, "-c"

    for shell in ["bash", "zsh", "sh"]:
        if shell_path := shutil.which(shell):
            return shell_path, "-c"

    return "/bin/sh", "-c"
extract_text_natively(data, filename='')

Extrahiert Text aus verschiedenen Dateitypen mit nativen Python-Methoden oder reinen Python-Bibliotheken (speziell PyPDF2 für PDFs).

Parameters:

Name Type Description Default
data bytes

Der Inhalt der Datei als Bytes.

required
filename str

Der Originaldateiname, um den Typ zu bestimmen.

''

Returns:

Name Type Description
str str

Der extrahierte Text.

Raises:

Type Description
ValueError

Wenn der Dateityp nicht unterstützt wird oder die Verarbeitung fehlschlägt.

ImportError

Wenn PyPDF2 für die Verarbeitung von PDF-Dateien benötigt, aber nicht installiert ist.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
def extract_text_natively(data: bytes, filename: str = "") -> str:
    """
    Extrahiert Text aus verschiedenen Dateitypen mit nativen Python-Methoden
    oder reinen Python-Bibliotheken (speziell PyPDF2 für PDFs).

    Args:
        data (bytes): Der Inhalt der Datei als Bytes.
        filename (str, optional): Der Originaldateiname, um den Typ zu bestimmen.

    Returns:
        str: Der extrahierte Text.

    Raises:
        ValueError: Wenn der Dateityp nicht unterstützt wird oder die Verarbeitung fehlschlägt.
        ImportError: Wenn PyPDF2 für die Verarbeitung von PDF-Dateien benötigt, aber nicht installiert ist.
    """
    file_ext = filename.lower().split('.')[-1] if '.' in filename else ''

    # 1. DOCX-Verarbeitung (nativ mit zipfile und xml)
    if data.startswith(b'PK\x03\x04'):
        try:
            docx_file = io.BytesIO(data)
            text_parts = []
            with zipfile.ZipFile(docx_file) as zf:
                namespace = "{http://schemas.openxmlformats.org/wordprocessingml/2006/main}"
                body_path = "word/document.xml"
                if body_path in zf.namelist():
                    xml_content = zf.read(body_path)
                    tree = ET.fromstring(xml_content)
                    for para in tree.iter(f"{namespace}p"):
                        texts_in_para = [node.text for node in para.iter(f"{namespace}t") if node.text]
                        if texts_in_para:
                            text_parts.append("".join(texts_in_para))
                return "\n".join(text_parts)
        except (zipfile.BadZipFile, ET.ParseError):
            pass  # Fährt fort, falls es eine ZIP-Datei, aber kein gültiges DOCX ist

    # 2. PDF-Verarbeitung (mit PyPDF2)
    if data.startswith(b'%PDF-'):
        if PyPDF2 is None:
            raise ImportError(
                "Die Bibliothek 'PyPDF2' wird benötigt, um PDF-Dateien zu verarbeiten. Bitte installieren Sie sie mit 'pip install PyPDF2'.")

        try:
            # Erstelle ein In-Memory-Dateiobjekt für PyPDF2
            pdf_file = io.BytesIO(data)
            # Verwende PdfFileReader aus PyPDF2
            pdf_reader = PyPDF2.PdfFileReader(pdf_file)

            text_parts = []
            # Iteriere durch die Seiten
            for page_num in range(pdf_reader.numPages):
                page = pdf_reader.getPage(page_num)
                # Extrahiere Text mit extractText()
                page_text = page.extractText()
                if page_text:
                    text_parts.append(page_text)

            return "\n".join(text_parts)
        except Exception as e:
            raise ValueError(f"PDF-Verarbeitung mit PyPDF2 fehlgeschlagen: {e}")

    # 3. Fallback auf reinen Text (TXT)

    try:
        return data.decode('utf-8')
    except UnicodeDecodeError:
        try:
            return data.decode('latin-1')
        except Exception as e:
            raise ValueError(f"Text-Dekodierung fehlgeschlagen: {e}")
find_json_objects_in_str(data)

Sucht nach JSON-Objekten innerhalb eines Strings. Gibt eine Liste von JSON-Objekten zurück, die im String gefunden wurden.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1478
1479
1480
1481
1482
1483
1484
1485
1486
def find_json_objects_in_str(data: str):
    """
    Sucht nach JSON-Objekten innerhalb eines Strings.
    Gibt eine Liste von JSON-Objekten zurück, die im String gefunden wurden.
    """
    json_objects = extract_json_objects(data)
    if not isinstance(json_objects, list):
        json_objects = [json_objects]
    return [get_json_from_json_str(ob, 10) for ob in json_objects if get_json_from_json_str(ob, 10) is not None]
get_json_from_json_str(json_str, repeat=1)

Versucht, einen JSON-String in ein Python-Objekt umzuwandeln.

Wenn beim Parsen ein Fehler auftritt, versucht die Funktion, das Problem zu beheben, indem sie das Zeichen an der Position des Fehlers durch ein Escape-Zeichen ersetzt. Dieser Vorgang wird bis zu repeat-mal wiederholt.

Parameters:

Name Type Description Default
json_str str or list or dict

Der JSON-String, der geparst werden soll.

required
repeat int

Die Anzahl der Versuche, das Parsen durchzuführen.

1

Returns:

Type Description
dict or None

Das resultierende Python-Objekt.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
def get_json_from_json_str(json_str: str or list or dict, repeat: int = 1) -> dict or None:
    """Versucht, einen JSON-String in ein Python-Objekt umzuwandeln.

    Wenn beim Parsen ein Fehler auftritt, versucht die Funktion, das Problem zu beheben,
    indem sie das Zeichen an der Position des Fehlers durch ein Escape-Zeichen ersetzt.
    Dieser Vorgang wird bis zu `repeat`-mal wiederholt.

    Args:
        json_str: Der JSON-String, der geparst werden soll.
        repeat: Die Anzahl der Versuche, das Parsen durchzuführen.

    Returns:
        Das resultierende Python-Objekt.
    """
    for _ in range(repeat):
        try:
            return parse_json_with_auto_detection(json_str)
        except json.JSONDecodeError as e:
            unexp = int(re.findall(r'\(char (\d+)\)', str(e))[0])
            unesc = json_str.rfind(r'"', 0, unexp)
            json_str = json_str[:unesc] + r'\"' + json_str[unesc + 1:]
            closg = json_str.find(r'"', unesc + 2)
            json_str = json_str[:closg] + r'\"' + json_str[closg + 1:]
        new = fix_json_object(json_str)
        if new is not None:
            json_str = new
    get_logger().info(f"Unable to parse JSON string after {json_str}")
    return None
parse_json_with_auto_detection(json_data)

Parses JSON data, automatically detecting if a value is a JSON string and parsing it accordingly. If a value cannot be parsed as JSON, it is returned as is.

Source code in toolboxv2/mods/isaa/base/AgentUtils.py
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
def parse_json_with_auto_detection(json_data):
    """
    Parses JSON data, automatically detecting if a value is a JSON string and parsing it accordingly.
    If a value cannot be parsed as JSON, it is returned as is.
    """

    def try_parse_json(value):
        """
        Tries to parse a value as JSON. If the parsing fails, the original value is returned.
        """
        try:
            # print("parse_json_with_auto_detection:", type(value), value)
            parsed_value = json.loads(value)
            # print("parsed_value:", type(parsed_value), parsed_value)
            # If the parsed value is a string, it might be a JSON string, so we try to parse it again
            if isinstance(parsed_value, str):
                return eval(parsed_value)
            else:
                return parsed_value
        except Exception:
            # logging.warning(f"Failed to parse value as JSON: {value}. Exception: {e}")
            return value

    get_logger()

    if isinstance(json_data, dict):
        return {key: parse_json_with_auto_detection(value) for key, value in json_data.items()}
    elif isinstance(json_data, list):
        return [parse_json_with_auto_detection(item) for item in json_data]
    else:
        return try_parse_json(json_data)
KnowledgeBase
Chunk dataclass

Represents a chunk of text with its embedding and metadata

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
27
28
29
30
31
32
33
34
@dataclass(slots=True)
class Chunk:
    """Represents a chunk of text with its embedding and metadata"""
    text: str
    embedding: np.ndarray
    metadata: dict[str, Any]
    content_hash: str
    cluster_id: int | None = None
ConceptAnalysis

Bases: BaseModel

Represents the analysis of key concepts.

Attributes:

Name Type Description
key_concepts list[str]

A list of primary key concepts identified.

relationships list[str]

A list of relationships between the identified key concepts.

importance_hierarchy list[str]

A list that represents the hierarchical importance of the key concepts.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
111
112
113
114
115
116
117
118
119
120
121
122
class ConceptAnalysis(BaseModel):
    """
    Represents the analysis of key concepts.

    Attributes:
        key_concepts (list[str]): A list of primary key concepts identified.
        relationships (list[str]): A list of relationships between the identified key concepts.
        importance_hierarchy (list[str]): A list that represents the hierarchical importance of the key concepts.
    """
    key_concepts: list[str]
    relationships: list[str]
    importance_hierarchy: list[str]
ConceptExtractor

Handles extraction of concepts and relationships from text

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
class ConceptExtractor:
    """Handles extraction of concepts and relationships from text"""

    def __init__(self, knowledge_base, requests_per_second = 85.):
        self.kb = knowledge_base
        self.concept_graph = ConceptGraph()
        self.requests_per_second = requests_per_second

    async def extract_concepts(self, texts: list[str], metadatas: list[dict[str, Any]]) -> list[list[Concept]]:
        """
        Extract concepts from texts using concurrent processing with rate limiting.
        Requests are made at the specified rate while responses are processed asynchronously.
        """
        # Ensure metadatas list matches texts length
        metadatas = metadatas + [{}] * (len(texts) - len(metadatas))

        # Initialize rate limiter
        rate_limiter = DynamicRateLimiter()

        system_prompt = (
            "Analyze the given text and extract key concepts and their relationships. For each concept:\n"
            "1. Identify the concept name and category (technical, domain, method, property, ...)\n"
            "2. Determine relationships with other concepts (uses, part_of, similar_to, depends_on, ...)\n"
            "3. Assess importance (0-1 score) based on centrality to the text\n"
            "4. Extract relevant context snippets\n"
            "5. Max 5 Concepts!\n"
            "only return in json format!\n"
            """{"concepts": [{
                "name": "concept_name",
                "category": "category_name",
                "relationships": {
                    "relationship_type": ["related_concept1", "related_concept2"]
                },
                "importance_score": 0.0,
                "context_snippets": ["relevant text snippet"]
            }]}\n"""
        )

        # Prepare all requests
        requests = [
            (idx, f"Text to Convert in to JSON structure:\n{text}", system_prompt, metadata)
            for idx, (text, metadata) in enumerate(zip(texts, metadatas, strict=False))
        ]

        async def process_single_request(idx: int, prompt: str, system_prompt: str, metadata: dict[str, Any]):
            """Process a single request with rate limiting"""
            try:
                from toolboxv2.mods.isaa.extras.adapter import litellm_complete
                # Wait for rate limit
                await rate_limiter.acquire()
                i__[1] += 1
                # Make API call without awaiting the response
                response_future = litellm_complete(
                    prompt=prompt,
                    system_prompt=system_prompt,
                    response_format=Concepts,
                    model_name=self.kb.model_name,
                    fallbacks=["groq/gemma2-9b-it"] +
                              [m for m in os.getenv("FALLBACKS_MODELS_PREM", '').split(',') if m]
                )

                return idx, response_future

            except Exception as e:
                print(f"Error initiating request {idx}: {str(e)}")
                return idx, None

        async def process_response(idx: int, response_future) -> list[Concept]:
            """Process the response once it's ready"""
            try:
                if response_future is None:
                    return []

                response = await response_future
                return await self._process_response(response, metadatas[idx])

            except Exception as e:
                print(f"Error processing response {idx}: {str(e)}")
                return []

        # Create tasks for all requests
        request_tasks = []
        batch_size = self.kb.batch_size

        rate_limiter.update_rate(self.requests_per_second)

        for batch_start in range(0, len(requests), batch_size):
            batch = requests[batch_start:batch_start + batch_size]

            # Create tasks for the batch
            batch_tasks = [
                process_single_request(idx, prompt, sys_prompt, meta)
                for idx, prompt, sys_prompt, meta in batch
            ]
            request_tasks.extend(batch_tasks)

        # Execute all requests with rate limiting
        request_results = await asyncio.gather(*request_tasks)

        # Process responses as they complete
        response_tasks = [
            process_response(idx, response_future)
            for idx, response_future in request_results
        ]

        # Gather all results
        all_results = await asyncio.gather(*response_tasks)

        # Sort results by original index
        sorted_results = [[] for _ in texts]
        for idx, concepts in enumerate(all_results):
            sorted_results[idx] = concepts

        return sorted_results

    async def _process_response(self, response: Any, metadata: dict[str, Any]) -> list[Concept]:
        """Helper method to process a single response and convert it to Concepts"""
        try:
            # Extract content from response
            if hasattr(response, 'choices'):
                content = response.choices[0].message.content
                if content is None:
                    content = response.choices[0].message.tool_calls[0].function.arguments
                if content is None:
                    return []
            elif isinstance(response, str):
                content = response
            else:
                print(f"Unexpected response type: {type(response)}")
                return []

            from toolboxv2.mods.isaa.extras.filter import after_format
            # Parse JSON and create concepts
            concept_data = after_format(content)
            concepts = []

            for concept_info in concept_data.get("concepts", []):
                concept = Concept(
                    name=concept_info["name"],
                    category=concept_info.get("category", "N/A"),
                    relationships={k: set(v) for k, v in concept_info.get("relationships", {}).items()},
                    importance_score=concept_info.get("importance_score", 0.1),
                    context_snippets=concept_info.get("context_snippets", "N/A"),
                    metadata=metadata
                )
                concepts.append(concept)
                self.concept_graph.add_concept(concept)

            return concepts

        except Exception:
            i__[2] +=1
            return []

    async def process_chunks(self, chunks: list[Chunk]) -> None:
        """
        Process all chunks in batch to extract and store concepts.
        Each chunk's metadata will be updated with the concept names and relationships.
        """
        # Gather all texts from the chunks.
        texts = [chunk.text for chunk in chunks]
        # Call extract_concepts once with all texts.
        all_concepts = await self.extract_concepts(texts, [chunk.metadata for chunk in chunks])

        # Update each chunk's metadata with its corresponding concepts.
        for chunk, concepts in zip(chunks, all_concepts, strict=False):
            chunk.metadata["concepts"] = [c.name for c in concepts]
            chunk.metadata["concept_relationships"] = {
                c.name: {k: list(v) for k, v in c.relationships.items()}
                for c in concepts
            }

    async def query_concepts(self, query: str) -> dict[str, any]:
        """Query the concept graph based on natural language query"""

        system_prompt = """
        Convert the natural language query about concepts into a structured format that specifies:
        1. Main concepts of interest
        2. Desired relationship types
        3. Any category filters
        4. Importance threshold

        Format as JSON.
        """

        prompt = f"""
        Query: {query}

        Convert to this JSON structure:
        {{
            "target_concepts": ["concept1", "concept2"],
            "relationship_types": ["type1", "type2"],
            "categories": ["category1", "category2"],
            "min_importance": 0.0
        }}
        """

        try:
            from toolboxv2.mods.isaa.extras.adapter import litellm_complete
            response = await litellm_complete(
                model_name=self.kb.model_name,
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=TConcept
            )

            query_params = json.loads(response)

            results = {
                "concepts": {},
                "relationships": [],
                "groups": []
            }

            # Find matching concepts
            for concept_name in query_params["target_concepts"]:
                if concept_name in self.concept_graph.concepts:
                    concept = self.concept_graph.concepts[concept_name]
                    if concept.importance_score >= query_params["min_importance"]:
                        results["concepts"][concept_name] = {
                            "category": concept.category,
                            "importance": concept.importance_score,
                            "context": concept.context_snippets
                        }

                        # Get relationships
                        for rel_type in query_params["relationship_types"]:
                            related = self.concept_graph.get_related_concepts(
                                concept_name, rel_type
                            )
                            for related_concept in related:
                                results["relationships"].append({
                                    "from": concept_name,
                                    "to": related_concept,
                                    "type": rel_type
                                })

            # Group concepts by category
            category_groups = defaultdict(list)
            for concept_name, concept_info in results["concepts"].items():
                category_groups[concept_info["category"]].append(concept_name)
            results["groups"] = [
                {"category": cat, "concepts": concepts}
                for cat, concepts in category_groups.items()
            ]

            return results

        except Exception as e:
            print(f"Error querying concepts: {str(e)}")
            return {"concepts": {}, "relationships": [], "groups": []}
extract_concepts(texts, metadatas) async

Extract concepts from texts using concurrent processing with rate limiting. Requests are made at the specified rate while responses are processed asynchronously.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
async def extract_concepts(self, texts: list[str], metadatas: list[dict[str, Any]]) -> list[list[Concept]]:
    """
    Extract concepts from texts using concurrent processing with rate limiting.
    Requests are made at the specified rate while responses are processed asynchronously.
    """
    # Ensure metadatas list matches texts length
    metadatas = metadatas + [{}] * (len(texts) - len(metadatas))

    # Initialize rate limiter
    rate_limiter = DynamicRateLimiter()

    system_prompt = (
        "Analyze the given text and extract key concepts and their relationships. For each concept:\n"
        "1. Identify the concept name and category (technical, domain, method, property, ...)\n"
        "2. Determine relationships with other concepts (uses, part_of, similar_to, depends_on, ...)\n"
        "3. Assess importance (0-1 score) based on centrality to the text\n"
        "4. Extract relevant context snippets\n"
        "5. Max 5 Concepts!\n"
        "only return in json format!\n"
        """{"concepts": [{
            "name": "concept_name",
            "category": "category_name",
            "relationships": {
                "relationship_type": ["related_concept1", "related_concept2"]
            },
            "importance_score": 0.0,
            "context_snippets": ["relevant text snippet"]
        }]}\n"""
    )

    # Prepare all requests
    requests = [
        (idx, f"Text to Convert in to JSON structure:\n{text}", system_prompt, metadata)
        for idx, (text, metadata) in enumerate(zip(texts, metadatas, strict=False))
    ]

    async def process_single_request(idx: int, prompt: str, system_prompt: str, metadata: dict[str, Any]):
        """Process a single request with rate limiting"""
        try:
            from toolboxv2.mods.isaa.extras.adapter import litellm_complete
            # Wait for rate limit
            await rate_limiter.acquire()
            i__[1] += 1
            # Make API call without awaiting the response
            response_future = litellm_complete(
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=Concepts,
                model_name=self.kb.model_name,
                fallbacks=["groq/gemma2-9b-it"] +
                          [m for m in os.getenv("FALLBACKS_MODELS_PREM", '').split(',') if m]
            )

            return idx, response_future

        except Exception as e:
            print(f"Error initiating request {idx}: {str(e)}")
            return idx, None

    async def process_response(idx: int, response_future) -> list[Concept]:
        """Process the response once it's ready"""
        try:
            if response_future is None:
                return []

            response = await response_future
            return await self._process_response(response, metadatas[idx])

        except Exception as e:
            print(f"Error processing response {idx}: {str(e)}")
            return []

    # Create tasks for all requests
    request_tasks = []
    batch_size = self.kb.batch_size

    rate_limiter.update_rate(self.requests_per_second)

    for batch_start in range(0, len(requests), batch_size):
        batch = requests[batch_start:batch_start + batch_size]

        # Create tasks for the batch
        batch_tasks = [
            process_single_request(idx, prompt, sys_prompt, meta)
            for idx, prompt, sys_prompt, meta in batch
        ]
        request_tasks.extend(batch_tasks)

    # Execute all requests with rate limiting
    request_results = await asyncio.gather(*request_tasks)

    # Process responses as they complete
    response_tasks = [
        process_response(idx, response_future)
        for idx, response_future in request_results
    ]

    # Gather all results
    all_results = await asyncio.gather(*response_tasks)

    # Sort results by original index
    sorted_results = [[] for _ in texts]
    for idx, concepts in enumerate(all_results):
        sorted_results[idx] = concepts

    return sorted_results
process_chunks(chunks) async

Process all chunks in batch to extract and store concepts. Each chunk's metadata will be updated with the concept names and relationships.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
async def process_chunks(self, chunks: list[Chunk]) -> None:
    """
    Process all chunks in batch to extract and store concepts.
    Each chunk's metadata will be updated with the concept names and relationships.
    """
    # Gather all texts from the chunks.
    texts = [chunk.text for chunk in chunks]
    # Call extract_concepts once with all texts.
    all_concepts = await self.extract_concepts(texts, [chunk.metadata for chunk in chunks])

    # Update each chunk's metadata with its corresponding concepts.
    for chunk, concepts in zip(chunks, all_concepts, strict=False):
        chunk.metadata["concepts"] = [c.name for c in concepts]
        chunk.metadata["concept_relationships"] = {
            c.name: {k: list(v) for k, v in c.relationships.items()}
            for c in concepts
        }
query_concepts(query) async

Query the concept graph based on natural language query

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
async def query_concepts(self, query: str) -> dict[str, any]:
    """Query the concept graph based on natural language query"""

    system_prompt = """
    Convert the natural language query about concepts into a structured format that specifies:
    1. Main concepts of interest
    2. Desired relationship types
    3. Any category filters
    4. Importance threshold

    Format as JSON.
    """

    prompt = f"""
    Query: {query}

    Convert to this JSON structure:
    {{
        "target_concepts": ["concept1", "concept2"],
        "relationship_types": ["type1", "type2"],
        "categories": ["category1", "category2"],
        "min_importance": 0.0
    }}
    """

    try:
        from toolboxv2.mods.isaa.extras.adapter import litellm_complete
        response = await litellm_complete(
            model_name=self.kb.model_name,
            prompt=prompt,
            system_prompt=system_prompt,
            response_format=TConcept
        )

        query_params = json.loads(response)

        results = {
            "concepts": {},
            "relationships": [],
            "groups": []
        }

        # Find matching concepts
        for concept_name in query_params["target_concepts"]:
            if concept_name in self.concept_graph.concepts:
                concept = self.concept_graph.concepts[concept_name]
                if concept.importance_score >= query_params["min_importance"]:
                    results["concepts"][concept_name] = {
                        "category": concept.category,
                        "importance": concept.importance_score,
                        "context": concept.context_snippets
                    }

                    # Get relationships
                    for rel_type in query_params["relationship_types"]:
                        related = self.concept_graph.get_related_concepts(
                            concept_name, rel_type
                        )
                        for related_concept in related:
                            results["relationships"].append({
                                "from": concept_name,
                                "to": related_concept,
                                "type": rel_type
                            })

        # Group concepts by category
        category_groups = defaultdict(list)
        for concept_name, concept_info in results["concepts"].items():
            category_groups[concept_info["category"]].append(concept_name)
        results["groups"] = [
            {"category": cat, "concepts": concepts}
            for cat, concepts in category_groups.items()
        ]

        return results

    except Exception as e:
        print(f"Error querying concepts: {str(e)}")
        return {"concepts": {}, "relationships": [], "groups": []}
ConceptGraph

Manages concept relationships and hierarchies

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
class ConceptGraph:
    """Manages concept relationships and hierarchies"""

    def __init__(self):
        self.concepts: dict[str, Concept] = {}

    def add_concept(self, concept: Concept):
        """Add or update a concept in the graph"""
        if concept.name.lower() in self.concepts:
            # Merge relationships and context
            existing = self.concepts[concept.name.lower()]
            for rel_type, related in concept.relationships.items():
                if rel_type not in existing.relationships:
                    existing.relationships[rel_type] = set()
                existing.relationships[rel_type].update(related)
            existing.context_snippets.extend(concept.context_snippets)
            # Update importance score with rolling average
            existing.importance_score = (existing.importance_score + concept.importance_score) / 2
        else:
            self.concepts[concept.name.lower()] = concept

    def get_related_concepts(self, concept_name: str, relationship_type: str | None = None) -> set[str]:
        """Get related concepts, optionally filtered by relationship type"""
        if concept_name not in self.concepts:
            return set()

        concept = self.concepts[concept_name.lower()]
        if relationship_type:
            return concept.relationships.get(relationship_type, set())

        related = set()
        for relations in concept.relationships.values():
            related.update(relations)
        return related


    def convert_to_networkx(self) -> nx.DiGraph:
        """Convert ConceptGraph to NetworkX graph with layout"""
        print(f"Converting to NetworkX graph with {len(self.concepts.values())} concepts")

        G = nx.DiGraph()

        if len(self.concepts.values()) == 0:
            return G

        for concept in self.concepts.values():
            cks = '\n - '.join(concept.context_snippets[:4])
            G.add_node(
                concept.name,
                size=concept.importance_score * 10,
                group=concept.category,
                title=f"""
                    {concept.name}
                    Category: {concept.category}
                    Importance: {concept.importance_score:.2f}
                    Context: \n - {cks}
                    """
            )

            for rel_type, targets in concept.relationships.items():
                for target in targets:
                    G.add_edge(concept.name, target, label=rel_type, title=rel_type)

        return G
add_concept(concept)

Add or update a concept in the graph

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def add_concept(self, concept: Concept):
    """Add or update a concept in the graph"""
    if concept.name.lower() in self.concepts:
        # Merge relationships and context
        existing = self.concepts[concept.name.lower()]
        for rel_type, related in concept.relationships.items():
            if rel_type not in existing.relationships:
                existing.relationships[rel_type] = set()
            existing.relationships[rel_type].update(related)
        existing.context_snippets.extend(concept.context_snippets)
        # Update importance score with rolling average
        existing.importance_score = (existing.importance_score + concept.importance_score) / 2
    else:
        self.concepts[concept.name.lower()] = concept
convert_to_networkx()

Convert ConceptGraph to NetworkX graph with layout

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
def convert_to_networkx(self) -> nx.DiGraph:
    """Convert ConceptGraph to NetworkX graph with layout"""
    print(f"Converting to NetworkX graph with {len(self.concepts.values())} concepts")

    G = nx.DiGraph()

    if len(self.concepts.values()) == 0:
        return G

    for concept in self.concepts.values():
        cks = '\n - '.join(concept.context_snippets[:4])
        G.add_node(
            concept.name,
            size=concept.importance_score * 10,
            group=concept.category,
            title=f"""
                {concept.name}
                Category: {concept.category}
                Importance: {concept.importance_score:.2f}
                Context: \n - {cks}
                """
        )

        for rel_type, targets in concept.relationships.items():
            for target in targets:
                G.add_edge(concept.name, target, label=rel_type, title=rel_type)

    return G
get_related_concepts(concept_name, relationship_type=None)

Get related concepts, optionally filtered by relationship type

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
189
190
191
192
193
194
195
196
197
198
199
200
201
def get_related_concepts(self, concept_name: str, relationship_type: str | None = None) -> set[str]:
    """Get related concepts, optionally filtered by relationship type"""
    if concept_name not in self.concepts:
        return set()

    concept = self.concepts[concept_name.lower()]
    if relationship_type:
        return concept.relationships.get(relationship_type, set())

    related = set()
    for relations in concept.relationships.values():
        related.update(relations)
    return related
Concepts

Bases: BaseModel

Represents a collection of key concepts.

Attributes:

Name Type Description
concepts List[rConcept]

A list of Concept instances, each representing an individual key concept.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
102
103
104
105
106
107
108
109
class Concepts(BaseModel):
    """
    Represents a collection of key concepts.

    Attributes:
        concepts (List[rConcept]): A list of Concept instances, each representing an individual key concept.
    """
    concepts: list[rConcept]
DataModel

Bases: BaseModel

The main data model that encapsulates the overall analysis.

Attributes:

Name Type Description
main_summary str

A Detailed overview summarizing the key findings and relations format MD string.

concept_analysis ConceptAnalysis

An instance containing the analysis of key concepts.

topic_insights TopicInsights

An instance containing insights regarding the topics.

relevance_assessment RelevanceAssessment

An instance assessing the relevance and alignment of the query.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
class DataModel(BaseModel):
    """
    The main data model that encapsulates the overall analysis.

    Attributes:
        main_summary (str): A Detailed overview summarizing the key findings and relations format MD string.
        concept_analysis (ConceptAnalysis): An instance containing the analysis of key concepts.
        topic_insights (TopicInsights): An instance containing insights regarding the topics.
        relevance_assessment (RelevanceAssessment): An instance assessing the relevance and alignment of the query.
    """
    main_summary: str
    concept_analysis: ConceptAnalysis
    topic_insights: TopicInsights
    relevance_assessment: RelevanceAssessment
DynamicRateLimiter
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
class DynamicRateLimiter:
    def __init__(self):
        self.last_request_time = 0.0
        self._lock = asyncio.Lock()

    def update_rate(self, requests_per_second: float):
        """Update rate limit dynamically"""
        self.min_interval = 1.0 / requests_per_second if requests_per_second > 0 else float('inf')

    async def acquire(self):
        """Acquire permission to make a request"""
        async with self._lock:
            current_time = time.time()
            time_since_last = current_time - self.last_request_time
            if time_since_last < self.min_interval:
                wait_time = self.min_interval - time_since_last
                await asyncio.sleep(wait_time)
            self.last_request_time = time.time()
acquire() async

Acquire permission to make a request

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
266
267
268
269
270
271
272
273
274
async def acquire(self):
    """Acquire permission to make a request"""
    async with self._lock:
        current_time = time.time()
        time_since_last = current_time - self.last_request_time
        if time_since_last < self.min_interval:
            wait_time = self.min_interval - time_since_last
            await asyncio.sleep(wait_time)
        self.last_request_time = time.time()
update_rate(requests_per_second)

Update rate limit dynamically

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
262
263
264
def update_rate(self, requests_per_second: float):
    """Update rate limit dynamically"""
    self.min_interval = 1.0 / requests_per_second if requests_per_second > 0 else float('inf')
GraphVisualizer
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
class GraphVisualizer:
    @staticmethod
    def visualize(nx_graph: nx.DiGraph, output_file: str = "concept_graph.html", get_output=False):
        """Create interactive visualization using PyVis"""
        from pyvis.network import Network
        net = Network(
            height="800px",
            width="100%",
            notebook=False,
            directed=True,
            bgcolor="#1a1a1a",
            font_color="white"
        )

        net.from_nx(nx_graph)

        net.save_graph(output_file)
        print(f"Graph saved to {output_file} Open in browser to view.", len(nx_graph))
        if get_output:
            c = open(output_file, encoding="utf-8").read()
            os.remove(output_file)
            return c
visualize(nx_graph, output_file='concept_graph.html', get_output=False) staticmethod

Create interactive visualization using PyVis

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@staticmethod
def visualize(nx_graph: nx.DiGraph, output_file: str = "concept_graph.html", get_output=False):
    """Create interactive visualization using PyVis"""
    from pyvis.network import Network
    net = Network(
        height="800px",
        width="100%",
        notebook=False,
        directed=True,
        bgcolor="#1a1a1a",
        font_color="white"
    )

    net.from_nx(nx_graph)

    net.save_graph(output_file)
    print(f"Graph saved to {output_file} Open in browser to view.", len(nx_graph))
    if get_output:
        c = open(output_file, encoding="utf-8").read()
        os.remove(output_file)
        return c
KnowledgeBase
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
class KnowledgeBase:
    def __init__(self, embedding_dim: int = 256, similarity_threshold: float = 0.61, batch_size: int = 64,
                 n_clusters: int = 4, deduplication_threshold: float = 0.85, model_name=os.getenv("SUMMARYMODEL"),
                 embedding_model=os.getenv("DEFAULTMODELEMBEDDING"),
                 vis_class:str | None = "FaissVectorStore",
                 vis_kwargs:dict[str, Any] | None=None,
                 requests_per_second=85.,
                 chunk_size: int = 3600,
                 chunk_overlap: int = 130,
                 separator: str = "\n"
                 ):
        """Initialize the knowledge base with given parameters"""

        self.existing_hashes: set[str] = set()
        self.embedding_model = embedding_model
        self.embedding_dim = embedding_dim
        self.similarity_threshold = similarity_threshold
        self.deduplication_threshold = deduplication_threshold
        if model_name == "openrouter/mistralai/mistral-nemo":
            batch_size = 9
            requests_per_second = 1.5
        self.batch_size = batch_size
        self.n_clusters = n_clusters
        self.model_name = model_name
        self.sto: list = []

        self.text_splitter = TextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap, separator=separator)
        self.similarity_graph = {}
        self.concept_extractor = ConceptExtractor(self, requests_per_second)

        self.vis_class = None
        self.vis_kwargs = None
        self.vdb = None
        self.init_vis(vis_class, vis_kwargs)

    def init_vis(self, vis_class, vis_kwargs):
        if vis_class is None:
            vis_class = "FaissVectorStore"
        if vis_class == "FaissVectorStore":
            if vis_kwargs is None:
                vis_kwargs = {
                    "dimension": self.embedding_dim
                }
            self.vdb = FaissVectorStore(**vis_kwargs)
        else:
            from toolboxv2.mods.isaa.base.VectorStores.taichiNumpyNumbaVectorStores import (
                EnhancedVectorStore,
                FastVectorStore1,
                FastVectorStoreO,
                NumpyVectorStore,
                VectorStoreConfig,
            )
        if vis_class == "FastVectorStoreO":
            if vis_kwargs is None:
                vis_kwargs = {
                    "embedding_size": self.embedding_dim
                }
            self.vdb = FastVectorStoreO(**vis_kwargs)
        if vis_class == "EnhancedVectorStore":
            if vis_kwargs is None:
                vis_kwargs = {
                    "dimension": self.embedding_dim
                }
            vis_kwargs = VectorStoreConfig(**vis_kwargs)
            self.vdb = EnhancedVectorStore(vis_kwargs)
        if vis_class == "FastVectorStore1":
            self.vdb = FastVectorStore1()
        if vis_class == "NumpyVectorStore":
            self.vdb = NumpyVectorStore()

        self.vis_class = vis_class
        self.vis_kwargs = vis_kwargs


    @staticmethod
    def compute_hash(text: str) -> str:
        """Compute SHA-256 hash of text"""
        return hashlib.sha256(text.encode('utf-8', errors='ignore')).hexdigest()

    async def _get_embeddings(self, texts: list[str]) -> np.ndarray:
        """Get normalized embeddings in batches"""
        try:
            async def process_batch(batch: list[str]) -> np.ndarray:
                from toolboxv2.mods.isaa.extras.adapter import litellm_embed
                # print("Processing", batch)
                embeddings = await litellm_embed(texts=batch, model=self.embedding_model)
                return normalize_vectors(embeddings)

            tasks = []
            for i in range(0, len(texts), self.batch_size):
                batch = texts[i:i + self.batch_size]
                tasks.append(process_batch(batch))

            embeddings = await asyncio.gather(*tasks)
            i__[0] += len(texts)
            return np.vstack(embeddings)
        except Exception as e:
            get_logger().error(f"Error generating embeddings: {str(e)}")
            raise



    def _remove_similar_chunks(self, threshold: float = None) -> int:
        """Remove chunks that are too similar to each other"""
        if len(self.vdb.chunks) < 2:
            return 0

        if threshold is None:
            threshold = self.deduplication_threshold

        try:
            # Get all embeddings
            embeddings = np.vstack([c.embedding for c in self.vdb.chunks])
            n = len(embeddings)

            # Compute similarity matrix
            similarities = np.dot(embeddings, embeddings.T)

            # Create mask for chunks to keep
            keep_mask = np.ones(n, dtype=bool)

            # Iterate through chunks
            for i in range(n):
                if not keep_mask[i]:
                    continue

                # Find chunks that are too similar to current chunk
                similar_indices = similarities[i] >= threshold
                similar_indices[i] = False  # Don't count self-similarity

                # Mark similar chunks for removal
                keep_mask[similar_indices] = False

            # Keep only unique chunks
            unique_chunks = [chunk for chunk, keep in zip(self.vdb.chunks, keep_mask, strict=False) if keep]
            removed_count = len(self.vdb.chunks) - len(unique_chunks)

            # Update chunks and hashes
            self.vdb.chunks = unique_chunks
            self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}

            # Rebuild index if chunks were removed
            if removed_count > 0:
                self.vdb.rebuild_index()


            return removed_count

        except Exception as e:
            get_logger().error(f"Error removing similar chunks: {str(e)}")
            raise

    async def _add_data(
        self,
        texts: list[str],
        metadata: list[dict[str, Any]] | None= None,
    ) -> tuple[int, int]:
        """
        Process and add new data to the knowledge base
        Returns: Tuple of (added_count, duplicate_count)
        """
        if len(texts) == 0:
            return -1, -1
        try:
            # Compute hashes and filter exact duplicates
            hashes = [self.compute_hash(text) for text in texts]
            unique_data = []
            for t, m, h in zip(texts, metadata, hashes, strict=False):
                if h in self.existing_hashes:
                    continue
                # Update existing hashes
                self.existing_hashes.add(h)
                unique_data.append((t, m, h))

            if not unique_data:
                return 0, len(texts)

            # Get embeddings
            embeddings = await self._get_embeddings(texts)

            texts = []
            metadata = []
            hashes = []
            embeddings_final = []
            if len(self.vdb.chunks):
                for i, d in enumerate(unique_data):
                    c = self.vdb.search(embeddings[i], 5, self.deduplication_threshold)
                    if len(c) > 2:
                        continue
                    t, m, h = d
                    texts.append(t)
                    metadata.append(m)
                    hashes.append(h)
                    embeddings_final.append(embeddings[i])

            else:
                texts , metadata, hashes = zip(*unique_data, strict=False)
                embeddings_final = embeddings

            if not texts:  # All were similar to existing chunks
                return 0, len(unique_data)

            # Create and add new chunks
            new_chunks = [
                Chunk(text=t, embedding=e, metadata=m, content_hash=h)
                for t, e, m, h in zip(texts, embeddings_final, metadata, hashes, strict=False)
            ]

            # Add new chunks
            # Update index
            if new_chunks:
                all_embeddings = np.vstack([c.embedding for c in new_chunks])
                self.vdb.add_embeddings(all_embeddings, new_chunks)

            # Remove similar chunks from the entire collection
            removed = self._remove_similar_chunks()
            get_logger().info(f"Removed {removed} similar chunks during deduplication")
            # Invalidate visualization cache

            if len(new_chunks) - removed > 0:
                # Process new chunks for concepts
                await self.concept_extractor.process_chunks(new_chunks)
            print("[total, calls, errors]", i__)

            return len(new_chunks) - removed, len(texts) - len(new_chunks) + removed

        except Exception as e:
            get_logger().error(f"Error adding data: {str(e)}")
            raise


    async def add_data(
        self,
        texts: list[str],
        metadata: list[dict[str, Any]] | None = None, direct:bool = False
    ) -> tuple[int, int]:
        """Enhanced version with smart splitting and clustering"""
        if isinstance(texts, str):
            texts = [texts]
        if metadata is None:
            metadata = [{}] * len(texts)
        if isinstance(metadata, dict):
            metadata = [metadata]
        if len(texts) != len(metadata):
            raise ValueError("Length of texts and metadata must match")

        if not direct and len(texts) == 1 and len(texts[0]) < 10_000:
            if len(self.sto) < self.batch_size and len(texts) == 1:
                self.sto.append((texts[0], metadata[0]))
                return -1, -1
            if len(self.sto) >= self.batch_size:
                _ = [texts.append(t) or metadata.append([m]) for (t, m) in self.sto]
                self.sto = []

        # Split large texts
        split_texts = []
        split_metadata = []

        while Spinner("Saving Data to Memory", symbols='t'):

            for idx, text in enumerate(texts):
                chunks = self.text_splitter.split_text(text)
                split_texts.extend(chunks)

                # Adjust metadata for splits
                meta = metadata[idx] if metadata else {}
                if isinstance(meta, list):
                    meta = meta[0]
                for i, _chunk in enumerate(chunks):
                    chunk_meta = meta.copy()
                    chunk_meta.update({
                        'chunk_index': i,
                        'total_chunks': len(chunks),
                        'original_text_id': idx
                    })
                    split_metadata.append(chunk_meta)

            return await self._add_data(split_texts, split_metadata)

    def _update_similarity_graph(self, embeddings: np.ndarray, chunk_ids: list[int]):
        """Update similarity graph for connected information detection"""
        similarities = np.dot(embeddings, embeddings.T)

        for i in range(len(chunk_ids)):
            for j in range(i + 1, len(chunk_ids)):
                if similarities[i, j] >= self.similarity_threshold:
                    id1, id2 = chunk_ids[i], chunk_ids[j]
                    if id1 not in self.similarity_graph:
                        self.similarity_graph[id1] = set()
                    if id2 not in self.similarity_graph:
                        self.similarity_graph[id2] = set()
                    self.similarity_graph[id1].add(id2)
                    self.similarity_graph[id2].add(id1)

    async def retrieve(
        self,
        query: str="",
        query_embedding: np.ndarray | None = None,
        k: int = 5,
        min_similarity: float = 0.2,
        include_connected: bool = True
    ) -> list[Chunk]:
        """Enhanced retrieval with connected information"""
        if query_embedding is None:
            query_embedding = (await self._get_embeddings([query]))[0]
        k = min(k, len(self.vdb.chunks))
        if k <= 0:
            return []
        initial_results = self.vdb.search(query_embedding, k, min_similarity)

        if not include_connected or not initial_results:
            return initial_results

        # Find connected chunks
        connected_chunks = set()
        for chunk in initial_results:
            chunk_id = self.vdb.chunks.index(chunk)
            if chunk_id in self.similarity_graph:
                connected_chunks.update(self.similarity_graph[chunk_id])

        # Add connected chunks to results
        all_chunks = self.vdb.chunks
        additional_results = [all_chunks[i] for i in connected_chunks
                              if all_chunks[i] not in initial_results]

        # Sort by similarity to query
        all_results = initial_results + additional_results

        return sorted(
            all_results,
            key=lambda x: np.dot(x.embedding, query_embedding),
            reverse=True
        )[:k * 2]  # Return more results when including connected information

    async def forget_irrelevant(self, irrelevant_concepts: list[str], similarity_threshold: float | None=None) -> int:
        """
        Remove chunks similar to irrelevant concepts
        Returns: Number of chunks removed
        """
        if not irrelevant_concepts:
            return 0

        if similarity_threshold is None:
            similarity_threshold = self.similarity_threshold

        try:
            irrelevant_embeddings = await self._get_embeddings(irrelevant_concepts)
            initial_count = len(self.vdb.chunks)

            def is_relevant(chunk: Chunk) -> bool:
                similarities = np.dot(chunk.embedding, irrelevant_embeddings.T)
                do_keep = np.max(similarities) < similarity_threshold
                if do_keep:
                    return True
                for c in chunk.metadata.get("concepts", []):
                    if c in self.concept_extractor.concept_graph.concepts:
                        del self.concept_extractor.concept_graph.concepts[c]
                return False

            relevant_chunks = [chunk for chunk in self.vdb.chunks if is_relevant(chunk)]
            self.vdb.chunks = relevant_chunks
            self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}
            self.vdb.rebuild_index()

            return initial_count - len(self.vdb.chunks)

        except Exception as e:
            get_logger().error(f"Error forgetting irrelevant concepts: {str(e)}")
            raise

    ## ----------------------------------------------------------------

    def _cluster_chunks(
        self,
        chunks: list[Chunk],
        query_embedding: np.ndarray | None = None,
        min_cluster_size: int = 2,
        min_samples: int = 1,
        max_clusters: int = 10
    ) -> dict[int, list[Chunk]]:
        """
        Enhanced clustering of chunks into topics with query awareness
        and dynamic parameter adjustment
        """
        if len(chunks) < 2:
            return {0: chunks}

        embeddings = np.vstack([chunk.embedding for chunk in chunks])

        # Normalize embeddings for cosine similarity
        embeddings = normalize_vectors(embeddings)

        # If query is provided, weight embeddings by query relevance
        if query_embedding is not None:
            query_similarities = np.dot(embeddings, query_embedding)
            # Apply soft weighting to maintain structure while considering query relevance
            embeddings = embeddings * query_similarities[:, np.newaxis]
            embeddings = normalize_vectors(embeddings)

        # Dynamic parameter adjustment based on dataset size
        adjusted_min_cluster_size = max(
            min_cluster_size,
            min(len(chunks) // 10, 5)  # Scale with data size, max 5
        )

        adjusted_min_samples = max(
            min_samples,
            adjusted_min_cluster_size // 2
        )

        # Try different parameter combinations for optimal clustering
        best_clusters = None
        best_score = float('-inf')

        epsilon_range = [0.2, 0.3, 0.4]
        try:
            HDBSCAN = __import__('sklearn.cluster').HDBSCAN
        except:
            print("install scikit-learn pip install scikit-learn for better results")
            return self._fallback_clustering(chunks, query_embedding)

        for epsilon in epsilon_range:
            clusterer = HDBSCAN(
                min_cluster_size=adjusted_min_cluster_size,
                min_samples=adjusted_min_samples,
                metric='cosine',
                cluster_selection_epsilon=epsilon
            )

            cluster_labels = clusterer.fit_predict(embeddings)

            # Skip if all points are noise
            if len(set(cluster_labels)) <= 1:
                continue

            # Calculate clustering quality metrics
            score = self._evaluate_clustering(
                embeddings,
                cluster_labels,
                query_embedding
            )

            if score > best_score:
                best_score = score
                best_clusters = cluster_labels

        # If no good clustering found, fall back to simpler approach
        if best_clusters is None:
            return self._fallback_clustering(chunks, query_embedding)

        # Organize chunks by cluster
        clusters: dict[int, list[Chunk]] = {}

        # Sort clusters by size and relevance
        cluster_scores = []

        for label in set(best_clusters):
            if label == -1:  # Handle noise points separately
                continue

            # Fixed: Use boolean mask to select chunks for current cluster
            cluster_mask = best_clusters == label
            cluster_chunks = [chunk for chunk, is_in_cluster in zip(chunks, cluster_mask, strict=False) if is_in_cluster]

            # Skip empty clusters
            if not cluster_chunks:
                continue

            # Calculate cluster score based on size and query relevance
            score = len(cluster_chunks)
            if query_embedding is not None:
                cluster_embeddings = np.vstack([c.embedding for c in cluster_chunks])
                query_relevance = np.mean(np.dot(cluster_embeddings, query_embedding))
                score = score * (1 + query_relevance)  # Boost by relevance

            cluster_scores.append((label, score, cluster_chunks))

        # Sort clusters by score and limit to max_clusters
        cluster_scores.sort(key=lambda x: x[1], reverse=True)

        # Assign cleaned clusters
        for i, (_, _, cluster_chunks) in enumerate(cluster_scores[:max_clusters]):
            clusters[i] = cluster_chunks

        # Handle noise points by assigning to nearest cluster
        noise_chunks = [chunk for chunk, label in zip(chunks, best_clusters, strict=False) if label == -1]
        if noise_chunks:
            self._assign_noise_points(noise_chunks, clusters, query_embedding)

        return clusters

    @staticmethod
    def _evaluate_clustering(
        embeddings: np.ndarray,
        labels: np.ndarray,
        query_embedding: np.ndarray | None = None
    ) -> float:
        """
        Evaluate clustering quality using multiple metrics
        """
        if len(set(labels)) <= 1:
            return float('-inf')

        # Calculate silhouette score for cluster cohesion
        try:
            sil_score = __import__('sklearn.metrics').silhouette_score(embeddings, labels, metric='cosine')
        except:
            print("install scikit-learn pip install scikit-learn for better results")
            sil_score = 0

        # Calculate Davies-Bouldin score for cluster separation
        try:
            db_score = -__import__('sklearn.metrics').davies_bouldin_score(embeddings, labels)  # Negated as lower is better
        except:
            print("install scikit-learn pip install scikit-learn for better results")
            db_score = 0

        # Calculate query relevance if provided
        query_score = 0
        if query_embedding is not None:
            unique_labels = set(labels) - {-1}
            if unique_labels:
                query_sims = []
                for label in unique_labels:
                    cluster_mask = labels == label
                    cluster_embeddings = embeddings[cluster_mask]
                    cluster_centroid = np.mean(cluster_embeddings, axis=0)
                    query_sims.append(np.dot(cluster_centroid, query_embedding))
                query_score = np.mean(query_sims)

        # Combine scores with weights
        combined_score = (
            0.4 * sil_score +
            0.3 * db_score +
            0.3 * query_score
        )

        return combined_score

    @staticmethod
    def _fallback_clustering(
        chunks: list[Chunk],
        query_embedding: np.ndarray | None = None
    ) -> dict[int, list[Chunk]]:
        """
        Simple fallback clustering when HDBSCAN fails
        """
        if query_embedding is not None:
            # Sort by query relevance
            chunks_with_scores = [
                (chunk, np.dot(chunk.embedding, query_embedding))
                for chunk in chunks
            ]
            chunks_with_scores.sort(key=lambda x: x[1], reverse=True)
            chunks = [c for c, _ in chunks_with_scores]

        # Create fixed-size clusters
        clusters = {}
        cluster_size = max(2, len(chunks) // 5)

        for i in range(0, len(chunks), cluster_size):
            clusters[len(clusters)] = chunks[i:i + cluster_size]

        return clusters

    @staticmethod
    def _assign_noise_points(
        noise_chunks: list[Chunk],
        clusters: dict[int, list[Chunk]],
        query_embedding: np.ndarray | None = None
    ) -> None:
        """
        Assign noise points to nearest clusters
        """
        if not clusters:
            clusters[0] = noise_chunks
            return

        for chunk in noise_chunks:
            best_cluster = None
            best_similarity = float('-inf')

            for cluster_id, cluster_chunks in clusters.items():
                cluster_embeddings = np.vstack([c.embedding for c in cluster_chunks])
                cluster_centroid = np.mean(cluster_embeddings, axis=0)

                similarity = np.dot(chunk.embedding, cluster_centroid)

                # Consider query relevance in assignment if available
                if query_embedding is not None:
                    query_sim = np.dot(chunk.embedding, query_embedding)
                    similarity = 0.7 * similarity + 0.3 * query_sim

                if similarity > best_similarity:
                    best_similarity = similarity
                    best_cluster = cluster_id

            if best_cluster is not None:
                clusters[best_cluster].append(chunk)

    @staticmethod
    def _generate_topic_summary(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
        max_sentences=3
    ) -> str:
        """Generate a summary for a topic using most representative chunks"""
        if not chunks:
            return ""

        # Find chunks most similar to cluster centroid
        embeddings = np.vstack([chunk.embedding for chunk in chunks])
        centroid = embeddings.mean(axis=0)

        # Calculate similarities to both centroid and query
        centroid_sims = np.dot(embeddings, centroid)
        query_sims = np.dot(embeddings, query_embedding)

        # Combine both similarities
        combined_sims = 0.7 * centroid_sims + 0.3 * query_sims

        # Select top sentences from most representative chunks
        top_indices = np.argsort(combined_sims)[-max_sentences:]
        summary_chunks = [chunks[i] for i in top_indices]

        # Extract key sentences
        sentences = []
        for chunk in summary_chunks:
            sentences.extend(sent.strip() for sent in chunk.text.split('.') if sent.strip())

        return '. '.join(sentences[:max_sentences]) + '.'

    async def retrieve_with_overview(
        self,
        query: str,
        query_embedding=None,
        k: int = 5,
        min_similarity: float = 0.2,
        max_sentences: int = 5,
        cross_ref_depth: int = 2,
        max_cross_refs: int = 10  # New parameter to control cross-reference count
    ) -> RetrievalResult:
        """Enhanced retrieval with better cross-reference handling"""
        # Get initial results with query embedding
        if query_embedding is None:
            query_embedding = (await self._get_embeddings([query]))[0]
        initial_results = await self.retrieve(query_embedding=query_embedding, k=k, min_similarity=min_similarity)

        if not initial_results:
            return RetrievalResult([], [], {})

        # Find cross-references with similarity scoring
        initial_ids = {self.vdb.chunks.index(chunk) for chunk in initial_results}
        related_ids = self._find_cross_references(
            initial_ids,
            depth=cross_ref_depth,
            query_embedding=query_embedding  # Pass query embedding for relevance scoring
        )

        # Get all relevant chunks with smarter filtering
        all_chunks = self.vdb.chunks
        all_relevant_chunks = initial_results + [
            chunk for i, chunk in enumerate(all_chunks)
            if i in related_ids and self._is_relevant_cross_ref(
                chunk,
                query_embedding,
                initial_results
            )
        ]

        # Enhanced clustering with dynamic cluster size
        clusters = self._cluster_chunks(
            all_relevant_chunks,
            query_embedding=query_embedding
        )

        # Fallback: If no clusters are found, treat all relevant chunks as a single cluster.
        if not clusters:
            print("No clusters found. Falling back to using all relevant chunks as a single cluster.")
            clusters = {0: all_relevant_chunks}

        # Generate summaries and organize results
        overview = []
        cross_references = {}

        for cluster_id, cluster_chunks in clusters.items():
            summary = self._generate_topic_summary(
                cluster_chunks,
                query_embedding,
                max_sentences=max_sentences  # Increased for more context
            )

            # Enhanced chunk sorting with combined scoring
            sorted_chunks = self._sort_chunks_by_relevance(
                cluster_chunks,
                query_embedding,
                initial_results
            )

            # Separate direct matches and cross-references
            direct_matches_ = [{'text':c.text, 'metadata':c.metadata} for c in sorted_chunks if c in initial_results]
            direct_matches = []
            for match in direct_matches_:
                if match in direct_matches:
                    continue
                direct_matches.append(match)
            cross_refs_ = [c for c in sorted_chunks if c not in initial_results]
            cross_refs = []
            for match in cross_refs_:
                if match in cross_refs:
                    continue
                cross_refs.append(match)
            # Limit cross-references while maintaining diversity
            selected_cross_refs = self._select_diverse_cross_refs(
                cross_refs,
                max_cross_refs,
                query_embedding
            )

            topic_info = {
                'topic_id': cluster_id,
                'summary': summary,
                'main_chunks': [x for x in direct_matches[:3]],
                'chunk_count': len(cluster_chunks),
                'relevance_score': self._calculate_topic_relevance(
                    cluster_chunks,
                    query_embedding
                )
            }
            overview.append(topic_info)

            if selected_cross_refs:
                cross_references[f"topic_{cluster_id}"] = selected_cross_refs

        # Sort overview by relevance score
        overview.sort(key=lambda x: x['relevance_score'], reverse=True)

        return RetrievalResult(
            overview=overview,
            details=initial_results,
            cross_references=cross_references
        )

    def _find_cross_references(
        self,
        chunk_ids: set[int],
        depth: int,
        query_embedding: np.ndarray
    ) -> set[int]:
        """Enhanced cross-reference finding with relevance scoring"""
        related_ids = set(chunk_ids)
        current_depth = 0
        frontier = set(chunk_ids)

        while current_depth < depth and frontier:
            new_frontier = set()
            for chunk_id in frontier:
                if chunk_id in self.similarity_graph:
                    # Score potential cross-references by relevance
                    candidates = self.similarity_graph[chunk_id] - related_ids
                    scored_candidates = [
                        (cid, self._calculate_topic_relevance(
                            [self.vdb.chunks[cid]],
                            query_embedding
                        ))
                        for cid in candidates
                    ]

                    # Filter by relevance threshold
                    relevant_candidates = {
                        cid for cid, score in scored_candidates
                        if score > 0.5  # Adjustable threshold
                    }
                    new_frontier.update(relevant_candidates)

            related_ids.update(new_frontier)
            frontier = new_frontier
            current_depth += 1

        return related_ids

    @staticmethod
    def _is_relevant_cross_ref(
        chunk: Chunk,
        query_embedding: np.ndarray,
        initial_results: list[Chunk]
    ) -> bool:
        """Determine if a cross-reference is relevant enough to include"""
        # Calculate similarity to query
        query_similarity = np.dot(chunk.embedding, query_embedding)

        # Calculate similarity to initial results
        initial_similarities = [
            np.dot(chunk.embedding, r.embedding) for r in initial_results
        ]
        max_initial_similarity = max(initial_similarities)

        # Combined relevance score
        relevance_score = 0.7 * query_similarity + 0.3 * max_initial_similarity

        return relevance_score > 0.6  # Adjustable threshold

    @staticmethod
    def _select_diverse_cross_refs(
        cross_refs: list[Chunk],
        max_count: int,
        query_embedding: np.ndarray
    ) -> list[Chunk]:
        """Select diverse and relevant cross-references"""
        if not cross_refs or len(cross_refs) <= max_count:
            return cross_refs

        # Calculate diversity scores
        embeddings = np.vstack([c.embedding for c in cross_refs])
        similarities = np.dot(embeddings, embeddings.T)

        selected = []
        remaining = list(enumerate(cross_refs))

        while len(selected) < max_count and remaining:
            # Score remaining chunks by relevance and diversity
            scores = []
            for idx, chunk in remaining:
                relevance = np.dot(chunk.embedding, query_embedding)
                diversity = 1.0
                if selected:
                    # Calculate diversity penalty based on similarity to selected chunks
                    selected_similarities = [
                        similarities[idx][list(cross_refs).index(s)]
                        for s in selected
                    ]
                    diversity = 1.0 - max(selected_similarities)

                combined_score = 0.7 * relevance + 0.3 * diversity
                scores.append((combined_score, idx, chunk))

            # Select the highest scoring chunk
            scores.sort(reverse=True)
            _, idx, chunk = scores[0]
            selected.append(chunk)
            remaining = [(i, c) for i, c in remaining if i != idx]

        return selected

    @staticmethod
    def _calculate_topic_relevance(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
    ) -> float:
        """Calculate overall topic relevance score"""
        if not chunks:
            return 0.0

        similarities = [
            np.dot(chunk.embedding, query_embedding) for chunk in chunks
        ]
        return np.mean(similarities)

    @staticmethod
    def _sort_chunks_by_relevance(
        chunks: list[Chunk],
        query_embedding: np.ndarray,
        initial_results: list[Chunk]
    ) -> list[Chunk]:
        """Sort chunks by combined relevance score"""
        scored_chunks = []
        for chunk in chunks:
            query_similarity = np.dot(chunk.embedding, query_embedding)
            initial_similarities = [
                np.dot(chunk.embedding, r.embedding)
                for r in initial_results
            ]
            max_initial_similarity = max(initial_similarities) if initial_similarities else 0

            # Combined score favoring query relevance
            combined_score = 0.7 * query_similarity + 0.3 * max_initial_similarity
            scored_chunks.append((combined_score, chunk))

        scored_chunks.sort(reverse=True)
        return [chunk for _, chunk in scored_chunks]

    async def query_concepts(self, query: str) -> dict[str, any]:
        """Query concepts extracted from the knowledge base"""
        return await self.concept_extractor.query_concepts(query)

    async def unified_retrieve(
        self,
        query: str,
        k: int = 5,
        min_similarity: float = 0.2,
        cross_ref_depth: int = 2,
        max_cross_refs: int = 10,
        max_sentences: int = 10
    ) -> dict[str, Any]:
        """
        Unified retrieval function that combines concept querying, retrieval with overview,
        and basic retrieval, then generates a comprehensive summary using LLM.

        Args:
            query: Search query string
            k: Number of primary results to retrieve
            min_similarity: Minimum similarity threshold for retrieval
            cross_ref_depth: Depth for cross-reference search
            max_cross_refs: Maximum number of cross-references per topic
            max_sentences: Maximum number Sentences in the main summary text

        Returns:
            Dictionary containing comprehensive results including summary and details
        """
        # Get concept information
        concept_results = await self.concept_extractor.query_concepts(query)

        # Get retrieval overview

        query_embedding = (await self._get_embeddings([query]))[0]
        overview_results = await self.retrieve_with_overview(
            query=query,
            query_embedding=query_embedding,
            k=k,
            min_similarity=min_similarity,
            cross_ref_depth=cross_ref_depth,
            max_cross_refs=max_cross_refs,
            max_sentences=max_sentences
        )

        # Get basic retrieval results
        basic_results = await self.retrieve(
            query_embedding=query_embedding,
            k=k,
            min_similarity=min_similarity
        )
        if len(basic_results) == 0:
            return {}
        if len(basic_results) == 1 and isinstance(basic_results[0], str) and basic_results[0].endswith('[]\n - []\n - []'):
            return {}

        # Prepare context for LLM summary
        context = {
            "concepts": {
                "main_concepts": concept_results.get("concepts", {}),
                "relationships": concept_results.get("relationships", []),
                "concept_groups": concept_results.get("groups", [])
            },
            "topics": [
                {
                    "id": topic["topic_id"],
                    "summary": topic["summary"],
                    "relevance": topic["relevance_score"],
                    "chunk_count": topic["chunk_count"]
                }
                for topic in overview_results.overview
            ],
            "key_chunks": [
                {
                    "text": chunk.text,
                    "metadata": chunk.metadata
                }
                for chunk in basic_results
            ]
        }

        # Generate comprehensive summary using LLM
        system_prompt = """
        Analyze the provided search results and generate a comprehensive summary
        that includes:
        1. Main concepts and their relationships
        2. Key topics and their relevance
        3. Most important findings and insights
        4. Cross-references and connections between topics
        5. Potential gaps or areas for further investigation

        Format the response as a JSON object with these sections.
        """

        prompt = f"""
        Query: {query}

        Context:
        {json.dumps(context, indent=2)}

        Generate a comprehensive analysis and summary following the structure:
        """

        try:
            from toolboxv2.mods.isaa.extras.adapter import litellm_complete
            await asyncio.sleep(0.25)
            llm_response = await litellm_complete(
                model_name=self.model_name,
                prompt=prompt,
                system_prompt=system_prompt,
                response_format=DataModel,
            )
            summary_analysis = json.loads(llm_response)
        except Exception as e:
            get_logger().error(f"Error generating summary: {str(e)}")
            summary_analysis = {
                "main_summary": "Error generating summary",
                "error": str(e)
            }

        # Compile final results
        return {
            "summary": summary_analysis,
            "raw_results": {
                "concepts": concept_results,
                "overview": {
                    "topics": overview_results.overview,
                    "cross_references": overview_results.cross_references
                },
                "relevant_chunks": [
                    {
                        "text": chunk.text,
                        "metadata": chunk.metadata,
                        "cluster_id": chunk.cluster_id
                    }
                    for chunk in basic_results
                ]
            },
            "metadata": {
                "query": query,
                "timestamp": time.time(),
                "retrieval_params": {
                    "k": k,
                    "min_similarity": min_similarity,
                    "cross_ref_depth": cross_ref_depth,
                    "max_cross_refs": max_cross_refs
                }
            }
        }

    def save(self, path: str) -> bytes | None:
        """
        Save the complete knowledge base to disk, including all sub-components

        Args:
            path (str): Path where the knowledge base will be saved
        """
        try:
            data = {
                # Core components
                'vdb': self.vdb.save(),
                'vis_kwargs': self.vis_kwargs,
                'vis_class': self.vis_class,
                'existing_hashes': self.existing_hashes,

                # Configuration parameters
                'embedding_dim': self.embedding_dim,
                'similarity_threshold': self.similarity_threshold,
                'batch_size': self.batch_size,
                'n_clusters': self.n_clusters,
                'deduplication_threshold': self.deduplication_threshold,
                'model_name': self.model_name,
                'embedding_model': self.embedding_model,

                # Cache and graph data
                'similarity_graph': self.similarity_graph,
                'sto': self.sto,

                # Text splitter configuration
                'text_splitter_config': {
                    'chunk_size': self.text_splitter.chunk_size,
                    'chunk_overlap': self.text_splitter.chunk_overlap,
                    'separator': self.text_splitter.separator
                },

                # Concept extractor data
                'concept_graph': {
                    'concepts': {
                        name: {
                            'name': concept.name,
                            'category': concept.category,
                            'relationships': {k: list(v) for k, v in concept.relationships.items()},
                            'importance_score': concept.importance_score,
                            'context_snippets': concept.context_snippets,
                            'metadata': concept.metadata
                        }
                        for name, concept in self.concept_extractor.concept_graph.concepts.items()
                    }
                }
            }
            b = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)

            if path is None:
                return b

            path = Path(path)
            tmp = path.with_suffix(path.suffix + ".tmp") if path.suffix else path.with_name(path.name + ".tmp")

            try:
                # Schreibe zuerst in eine temporäre Datei
                with open(tmp, "wb") as f:
                    f.write(b)
                    f.flush()
                    os.fsync(f.fileno())  # sicherstellen, dass die Daten auf Platte sind
                # Atomischer Austausch
                os.replace(tmp, path)
            finally:
                # Aufräumen falls tmp noch existiert (bei Fehlern)
                if tmp.exists():
                    with contextlib.suppress(Exception):
                        tmp.unlink()
            return None
            # print(f"Knowledge base successfully saved to {path} with {len(self.concept_extractor.concept_graph.concepts.items())} concepts")

        except Exception as e:
            print(f"Error saving knowledge base: {str(e)}")
            raise
    def init_vdb(self, db:AbstractVectorStore=AbstractVectorStore):
        pass
    @classmethod
    def load(cls, path: str | bytes) -> 'KnowledgeBase':
        """
        Load a complete knowledge base from disk, including all sub-components

        Args:
            path (str): Path from where to load the knowledge base

        Returns:
            KnowledgeBase: A fully restored knowledge base instance
        """
        try:
            if isinstance(path, bytes | bytearray | memoryview):
                data_bytes = bytes(path)
                try:
                    data = pickle.loads(data_bytes)
                except Exception as e:
                    raise EOFError(f"Fehler beim pickle.loads von bytes: {e}") from e
            else:
                p = Path(path)
                if not p.exists():
                    raise FileNotFoundError(f"{p} existiert nicht")
                size = p.stat().st_size
                if size == 0:
                    raise EOFError(f"{p} ist leer (0 bytes)")
                try:
                    with open(p, "rb") as f:
                        try:
                            data = pickle.load(f)
                        except EOFError as e:
                            # Debug info: erste bytes ausgeben
                            f.seek(0)
                            snippet = f.read(128)
                            raise EOFError(
                                f"EOFError beim Laden {p} (Größe {size} bytes). Erste 128 bytes: {snippet!r}") from e

                except Exception as e:
                    raise ValueError(f"Invalid path type {e}") from e

            # Create new knowledge base instance with saved configuration
            kb = cls(
                embedding_dim=data['embedding_dim'],
                similarity_threshold=data['similarity_threshold'],
                batch_size=data['batch_size'],
                n_clusters=data['n_clusters'],
                deduplication_threshold=data['deduplication_threshold'],
                model_name=data['model_name'],
                embedding_model=data['embedding_model']
            )

            # Restore core components
            kb.init_vis(data.get('vis_class'), data.get('vis_kwargs'))
            kb.existing_hashes = data['existing_hashes']

            # Restore cache and graph data
            kb.similarity_graph = data.get('similarity_graph', {})
            kb.sto = data.get('sto', [])

            # Restore text splitter configuration
            splitter_config = data.get('text_splitter_config', {})
            kb.text_splitter = TextSplitter(
                chunk_size=splitter_config.get('chunk_size', 12_000),
                chunk_overlap=splitter_config.get('chunk_overlap', 200),
                separator=splitter_config.get('separator', '\n')
            )

            # Restore concept graph
            concept_data = data.get('concept_graph', {}).get('concepts', {})
            for concept_info in concept_data.values():
                concept = Concept(
                    name=concept_info['name'],
                    category=concept_info['category'],
                    relationships={k: set(v) for k, v in concept_info['relationships'].items()},
                    importance_score=concept_info['importance_score'],
                    context_snippets=concept_info['context_snippets'],
                    metadata=concept_info['metadata']
                )
                kb.concept_extractor.concept_graph.add_concept(concept)

            # print(f"Knowledge base successfully loaded from {path} with {len(concept_data)} concepts")
            return kb

        except Exception:
            #print(f"Error loading knowledge base: {str(e)}")
            #import traceback
            #traceback.print_exception(e)
            raise

    def vis(self,output_file: str = "concept_graph.html", get_output_html=False, get_output_net=False):
        if not self.concept_extractor.concept_graph.concepts:
            print("NO Concepts defined")
            return None
        net = self.concept_extractor.concept_graph.convert_to_networkx()
        if get_output_net:
            return net
        return GraphVisualizer.visualize(net, output_file=output_file, get_output=get_output_html)
__init__(embedding_dim=256, similarity_threshold=0.61, batch_size=64, n_clusters=4, deduplication_threshold=0.85, model_name=os.getenv('SUMMARYMODEL'), embedding_model=os.getenv('DEFAULTMODELEMBEDDING'), vis_class='FaissVectorStore', vis_kwargs=None, requests_per_second=85.0, chunk_size=3600, chunk_overlap=130, separator='\n')

Initialize the knowledge base with given parameters

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
def __init__(self, embedding_dim: int = 256, similarity_threshold: float = 0.61, batch_size: int = 64,
             n_clusters: int = 4, deduplication_threshold: float = 0.85, model_name=os.getenv("SUMMARYMODEL"),
             embedding_model=os.getenv("DEFAULTMODELEMBEDDING"),
             vis_class:str | None = "FaissVectorStore",
             vis_kwargs:dict[str, Any] | None=None,
             requests_per_second=85.,
             chunk_size: int = 3600,
             chunk_overlap: int = 130,
             separator: str = "\n"
             ):
    """Initialize the knowledge base with given parameters"""

    self.existing_hashes: set[str] = set()
    self.embedding_model = embedding_model
    self.embedding_dim = embedding_dim
    self.similarity_threshold = similarity_threshold
    self.deduplication_threshold = deduplication_threshold
    if model_name == "openrouter/mistralai/mistral-nemo":
        batch_size = 9
        requests_per_second = 1.5
    self.batch_size = batch_size
    self.n_clusters = n_clusters
    self.model_name = model_name
    self.sto: list = []

    self.text_splitter = TextSplitter(chunk_size=chunk_size,chunk_overlap=chunk_overlap, separator=separator)
    self.similarity_graph = {}
    self.concept_extractor = ConceptExtractor(self, requests_per_second)

    self.vis_class = None
    self.vis_kwargs = None
    self.vdb = None
    self.init_vis(vis_class, vis_kwargs)
add_data(texts, metadata=None, direct=False) async

Enhanced version with smart splitting and clustering

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
async def add_data(
    self,
    texts: list[str],
    metadata: list[dict[str, Any]] | None = None, direct:bool = False
) -> tuple[int, int]:
    """Enhanced version with smart splitting and clustering"""
    if isinstance(texts, str):
        texts = [texts]
    if metadata is None:
        metadata = [{}] * len(texts)
    if isinstance(metadata, dict):
        metadata = [metadata]
    if len(texts) != len(metadata):
        raise ValueError("Length of texts and metadata must match")

    if not direct and len(texts) == 1 and len(texts[0]) < 10_000:
        if len(self.sto) < self.batch_size and len(texts) == 1:
            self.sto.append((texts[0], metadata[0]))
            return -1, -1
        if len(self.sto) >= self.batch_size:
            _ = [texts.append(t) or metadata.append([m]) for (t, m) in self.sto]
            self.sto = []

    # Split large texts
    split_texts = []
    split_metadata = []

    while Spinner("Saving Data to Memory", symbols='t'):

        for idx, text in enumerate(texts):
            chunks = self.text_splitter.split_text(text)
            split_texts.extend(chunks)

            # Adjust metadata for splits
            meta = metadata[idx] if metadata else {}
            if isinstance(meta, list):
                meta = meta[0]
            for i, _chunk in enumerate(chunks):
                chunk_meta = meta.copy()
                chunk_meta.update({
                    'chunk_index': i,
                    'total_chunks': len(chunks),
                    'original_text_id': idx
                })
                split_metadata.append(chunk_meta)

        return await self._add_data(split_texts, split_metadata)
compute_hash(text) staticmethod

Compute SHA-256 hash of text

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
681
682
683
684
@staticmethod
def compute_hash(text: str) -> str:
    """Compute SHA-256 hash of text"""
    return hashlib.sha256(text.encode('utf-8', errors='ignore')).hexdigest()
forget_irrelevant(irrelevant_concepts, similarity_threshold=None) async

Remove chunks similar to irrelevant concepts Returns: Number of chunks removed

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
async def forget_irrelevant(self, irrelevant_concepts: list[str], similarity_threshold: float | None=None) -> int:
    """
    Remove chunks similar to irrelevant concepts
    Returns: Number of chunks removed
    """
    if not irrelevant_concepts:
        return 0

    if similarity_threshold is None:
        similarity_threshold = self.similarity_threshold

    try:
        irrelevant_embeddings = await self._get_embeddings(irrelevant_concepts)
        initial_count = len(self.vdb.chunks)

        def is_relevant(chunk: Chunk) -> bool:
            similarities = np.dot(chunk.embedding, irrelevant_embeddings.T)
            do_keep = np.max(similarities) < similarity_threshold
            if do_keep:
                return True
            for c in chunk.metadata.get("concepts", []):
                if c in self.concept_extractor.concept_graph.concepts:
                    del self.concept_extractor.concept_graph.concepts[c]
            return False

        relevant_chunks = [chunk for chunk in self.vdb.chunks if is_relevant(chunk)]
        self.vdb.chunks = relevant_chunks
        self.existing_hashes = {chunk.content_hash for chunk in self.vdb.chunks}
        self.vdb.rebuild_index()

        return initial_count - len(self.vdb.chunks)

    except Exception as e:
        get_logger().error(f"Error forgetting irrelevant concepts: {str(e)}")
        raise
load(path) classmethod

Load a complete knowledge base from disk, including all sub-components

Parameters:

Name Type Description Default
path str

Path from where to load the knowledge base

required

Returns:

Name Type Description
KnowledgeBase KnowledgeBase

A fully restored knowledge base instance

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
@classmethod
def load(cls, path: str | bytes) -> 'KnowledgeBase':
    """
    Load a complete knowledge base from disk, including all sub-components

    Args:
        path (str): Path from where to load the knowledge base

    Returns:
        KnowledgeBase: A fully restored knowledge base instance
    """
    try:
        if isinstance(path, bytes | bytearray | memoryview):
            data_bytes = bytes(path)
            try:
                data = pickle.loads(data_bytes)
            except Exception as e:
                raise EOFError(f"Fehler beim pickle.loads von bytes: {e}") from e
        else:
            p = Path(path)
            if not p.exists():
                raise FileNotFoundError(f"{p} existiert nicht")
            size = p.stat().st_size
            if size == 0:
                raise EOFError(f"{p} ist leer (0 bytes)")
            try:
                with open(p, "rb") as f:
                    try:
                        data = pickle.load(f)
                    except EOFError as e:
                        # Debug info: erste bytes ausgeben
                        f.seek(0)
                        snippet = f.read(128)
                        raise EOFError(
                            f"EOFError beim Laden {p} (Größe {size} bytes). Erste 128 bytes: {snippet!r}") from e

            except Exception as e:
                raise ValueError(f"Invalid path type {e}") from e

        # Create new knowledge base instance with saved configuration
        kb = cls(
            embedding_dim=data['embedding_dim'],
            similarity_threshold=data['similarity_threshold'],
            batch_size=data['batch_size'],
            n_clusters=data['n_clusters'],
            deduplication_threshold=data['deduplication_threshold'],
            model_name=data['model_name'],
            embedding_model=data['embedding_model']
        )

        # Restore core components
        kb.init_vis(data.get('vis_class'), data.get('vis_kwargs'))
        kb.existing_hashes = data['existing_hashes']

        # Restore cache and graph data
        kb.similarity_graph = data.get('similarity_graph', {})
        kb.sto = data.get('sto', [])

        # Restore text splitter configuration
        splitter_config = data.get('text_splitter_config', {})
        kb.text_splitter = TextSplitter(
            chunk_size=splitter_config.get('chunk_size', 12_000),
            chunk_overlap=splitter_config.get('chunk_overlap', 200),
            separator=splitter_config.get('separator', '\n')
        )

        # Restore concept graph
        concept_data = data.get('concept_graph', {}).get('concepts', {})
        for concept_info in concept_data.values():
            concept = Concept(
                name=concept_info['name'],
                category=concept_info['category'],
                relationships={k: set(v) for k, v in concept_info['relationships'].items()},
                importance_score=concept_info['importance_score'],
                context_snippets=concept_info['context_snippets'],
                metadata=concept_info['metadata']
            )
            kb.concept_extractor.concept_graph.add_concept(concept)

        # print(f"Knowledge base successfully loaded from {path} with {len(concept_data)} concepts")
        return kb

    except Exception:
        #print(f"Error loading knowledge base: {str(e)}")
        #import traceback
        #traceback.print_exception(e)
        raise
query_concepts(query) async

Query concepts extracted from the knowledge base

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1488
1489
1490
async def query_concepts(self, query: str) -> dict[str, any]:
    """Query concepts extracted from the knowledge base"""
    return await self.concept_extractor.query_concepts(query)
retrieve(query='', query_embedding=None, k=5, min_similarity=0.2, include_connected=True) async

Enhanced retrieval with connected information

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
async def retrieve(
    self,
    query: str="",
    query_embedding: np.ndarray | None = None,
    k: int = 5,
    min_similarity: float = 0.2,
    include_connected: bool = True
) -> list[Chunk]:
    """Enhanced retrieval with connected information"""
    if query_embedding is None:
        query_embedding = (await self._get_embeddings([query]))[0]
    k = min(k, len(self.vdb.chunks))
    if k <= 0:
        return []
    initial_results = self.vdb.search(query_embedding, k, min_similarity)

    if not include_connected or not initial_results:
        return initial_results

    # Find connected chunks
    connected_chunks = set()
    for chunk in initial_results:
        chunk_id = self.vdb.chunks.index(chunk)
        if chunk_id in self.similarity_graph:
            connected_chunks.update(self.similarity_graph[chunk_id])

    # Add connected chunks to results
    all_chunks = self.vdb.chunks
    additional_results = [all_chunks[i] for i in connected_chunks
                          if all_chunks[i] not in initial_results]

    # Sort by similarity to query
    all_results = initial_results + additional_results

    return sorted(
        all_results,
        key=lambda x: np.dot(x.embedding, query_embedding),
        reverse=True
    )[:k * 2]  # Return more results when including connected information
retrieve_with_overview(query, query_embedding=None, k=5, min_similarity=0.2, max_sentences=5, cross_ref_depth=2, max_cross_refs=10) async

Enhanced retrieval with better cross-reference handling

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
async def retrieve_with_overview(
    self,
    query: str,
    query_embedding=None,
    k: int = 5,
    min_similarity: float = 0.2,
    max_sentences: int = 5,
    cross_ref_depth: int = 2,
    max_cross_refs: int = 10  # New parameter to control cross-reference count
) -> RetrievalResult:
    """Enhanced retrieval with better cross-reference handling"""
    # Get initial results with query embedding
    if query_embedding is None:
        query_embedding = (await self._get_embeddings([query]))[0]
    initial_results = await self.retrieve(query_embedding=query_embedding, k=k, min_similarity=min_similarity)

    if not initial_results:
        return RetrievalResult([], [], {})

    # Find cross-references with similarity scoring
    initial_ids = {self.vdb.chunks.index(chunk) for chunk in initial_results}
    related_ids = self._find_cross_references(
        initial_ids,
        depth=cross_ref_depth,
        query_embedding=query_embedding  # Pass query embedding for relevance scoring
    )

    # Get all relevant chunks with smarter filtering
    all_chunks = self.vdb.chunks
    all_relevant_chunks = initial_results + [
        chunk for i, chunk in enumerate(all_chunks)
        if i in related_ids and self._is_relevant_cross_ref(
            chunk,
            query_embedding,
            initial_results
        )
    ]

    # Enhanced clustering with dynamic cluster size
    clusters = self._cluster_chunks(
        all_relevant_chunks,
        query_embedding=query_embedding
    )

    # Fallback: If no clusters are found, treat all relevant chunks as a single cluster.
    if not clusters:
        print("No clusters found. Falling back to using all relevant chunks as a single cluster.")
        clusters = {0: all_relevant_chunks}

    # Generate summaries and organize results
    overview = []
    cross_references = {}

    for cluster_id, cluster_chunks in clusters.items():
        summary = self._generate_topic_summary(
            cluster_chunks,
            query_embedding,
            max_sentences=max_sentences  # Increased for more context
        )

        # Enhanced chunk sorting with combined scoring
        sorted_chunks = self._sort_chunks_by_relevance(
            cluster_chunks,
            query_embedding,
            initial_results
        )

        # Separate direct matches and cross-references
        direct_matches_ = [{'text':c.text, 'metadata':c.metadata} for c in sorted_chunks if c in initial_results]
        direct_matches = []
        for match in direct_matches_:
            if match in direct_matches:
                continue
            direct_matches.append(match)
        cross_refs_ = [c for c in sorted_chunks if c not in initial_results]
        cross_refs = []
        for match in cross_refs_:
            if match in cross_refs:
                continue
            cross_refs.append(match)
        # Limit cross-references while maintaining diversity
        selected_cross_refs = self._select_diverse_cross_refs(
            cross_refs,
            max_cross_refs,
            query_embedding
        )

        topic_info = {
            'topic_id': cluster_id,
            'summary': summary,
            'main_chunks': [x for x in direct_matches[:3]],
            'chunk_count': len(cluster_chunks),
            'relevance_score': self._calculate_topic_relevance(
                cluster_chunks,
                query_embedding
            )
        }
        overview.append(topic_info)

        if selected_cross_refs:
            cross_references[f"topic_{cluster_id}"] = selected_cross_refs

    # Sort overview by relevance score
    overview.sort(key=lambda x: x['relevance_score'], reverse=True)

    return RetrievalResult(
        overview=overview,
        details=initial_results,
        cross_references=cross_references
    )
save(path)

Save the complete knowledge base to disk, including all sub-components

Parameters:

Name Type Description Default
path str

Path where the knowledge base will be saved

required
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
def save(self, path: str) -> bytes | None:
    """
    Save the complete knowledge base to disk, including all sub-components

    Args:
        path (str): Path where the knowledge base will be saved
    """
    try:
        data = {
            # Core components
            'vdb': self.vdb.save(),
            'vis_kwargs': self.vis_kwargs,
            'vis_class': self.vis_class,
            'existing_hashes': self.existing_hashes,

            # Configuration parameters
            'embedding_dim': self.embedding_dim,
            'similarity_threshold': self.similarity_threshold,
            'batch_size': self.batch_size,
            'n_clusters': self.n_clusters,
            'deduplication_threshold': self.deduplication_threshold,
            'model_name': self.model_name,
            'embedding_model': self.embedding_model,

            # Cache and graph data
            'similarity_graph': self.similarity_graph,
            'sto': self.sto,

            # Text splitter configuration
            'text_splitter_config': {
                'chunk_size': self.text_splitter.chunk_size,
                'chunk_overlap': self.text_splitter.chunk_overlap,
                'separator': self.text_splitter.separator
            },

            # Concept extractor data
            'concept_graph': {
                'concepts': {
                    name: {
                        'name': concept.name,
                        'category': concept.category,
                        'relationships': {k: list(v) for k, v in concept.relationships.items()},
                        'importance_score': concept.importance_score,
                        'context_snippets': concept.context_snippets,
                        'metadata': concept.metadata
                    }
                    for name, concept in self.concept_extractor.concept_graph.concepts.items()
                }
            }
        }
        b = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)

        if path is None:
            return b

        path = Path(path)
        tmp = path.with_suffix(path.suffix + ".tmp") if path.suffix else path.with_name(path.name + ".tmp")

        try:
            # Schreibe zuerst in eine temporäre Datei
            with open(tmp, "wb") as f:
                f.write(b)
                f.flush()
                os.fsync(f.fileno())  # sicherstellen, dass die Daten auf Platte sind
            # Atomischer Austausch
            os.replace(tmp, path)
        finally:
            # Aufräumen falls tmp noch existiert (bei Fehlern)
            if tmp.exists():
                with contextlib.suppress(Exception):
                    tmp.unlink()
        return None
        # print(f"Knowledge base successfully saved to {path} with {len(self.concept_extractor.concept_graph.concepts.items())} concepts")

    except Exception as e:
        print(f"Error saving knowledge base: {str(e)}")
        raise
unified_retrieve(query, k=5, min_similarity=0.2, cross_ref_depth=2, max_cross_refs=10, max_sentences=10) async

Unified retrieval function that combines concept querying, retrieval with overview, and basic retrieval, then generates a comprehensive summary using LLM.

Parameters:

Name Type Description Default
query str

Search query string

required
k int

Number of primary results to retrieve

5
min_similarity float

Minimum similarity threshold for retrieval

0.2
cross_ref_depth int

Depth for cross-reference search

2
max_cross_refs int

Maximum number of cross-references per topic

10
max_sentences int

Maximum number Sentences in the main summary text

10

Returns:

Type Description
dict[str, Any]

Dictionary containing comprehensive results including summary and details

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
async def unified_retrieve(
    self,
    query: str,
    k: int = 5,
    min_similarity: float = 0.2,
    cross_ref_depth: int = 2,
    max_cross_refs: int = 10,
    max_sentences: int = 10
) -> dict[str, Any]:
    """
    Unified retrieval function that combines concept querying, retrieval with overview,
    and basic retrieval, then generates a comprehensive summary using LLM.

    Args:
        query: Search query string
        k: Number of primary results to retrieve
        min_similarity: Minimum similarity threshold for retrieval
        cross_ref_depth: Depth for cross-reference search
        max_cross_refs: Maximum number of cross-references per topic
        max_sentences: Maximum number Sentences in the main summary text

    Returns:
        Dictionary containing comprehensive results including summary and details
    """
    # Get concept information
    concept_results = await self.concept_extractor.query_concepts(query)

    # Get retrieval overview

    query_embedding = (await self._get_embeddings([query]))[0]
    overview_results = await self.retrieve_with_overview(
        query=query,
        query_embedding=query_embedding,
        k=k,
        min_similarity=min_similarity,
        cross_ref_depth=cross_ref_depth,
        max_cross_refs=max_cross_refs,
        max_sentences=max_sentences
    )

    # Get basic retrieval results
    basic_results = await self.retrieve(
        query_embedding=query_embedding,
        k=k,
        min_similarity=min_similarity
    )
    if len(basic_results) == 0:
        return {}
    if len(basic_results) == 1 and isinstance(basic_results[0], str) and basic_results[0].endswith('[]\n - []\n - []'):
        return {}

    # Prepare context for LLM summary
    context = {
        "concepts": {
            "main_concepts": concept_results.get("concepts", {}),
            "relationships": concept_results.get("relationships", []),
            "concept_groups": concept_results.get("groups", [])
        },
        "topics": [
            {
                "id": topic["topic_id"],
                "summary": topic["summary"],
                "relevance": topic["relevance_score"],
                "chunk_count": topic["chunk_count"]
            }
            for topic in overview_results.overview
        ],
        "key_chunks": [
            {
                "text": chunk.text,
                "metadata": chunk.metadata
            }
            for chunk in basic_results
        ]
    }

    # Generate comprehensive summary using LLM
    system_prompt = """
    Analyze the provided search results and generate a comprehensive summary
    that includes:
    1. Main concepts and their relationships
    2. Key topics and their relevance
    3. Most important findings and insights
    4. Cross-references and connections between topics
    5. Potential gaps or areas for further investigation

    Format the response as a JSON object with these sections.
    """

    prompt = f"""
    Query: {query}

    Context:
    {json.dumps(context, indent=2)}

    Generate a comprehensive analysis and summary following the structure:
    """

    try:
        from toolboxv2.mods.isaa.extras.adapter import litellm_complete
        await asyncio.sleep(0.25)
        llm_response = await litellm_complete(
            model_name=self.model_name,
            prompt=prompt,
            system_prompt=system_prompt,
            response_format=DataModel,
        )
        summary_analysis = json.loads(llm_response)
    except Exception as e:
        get_logger().error(f"Error generating summary: {str(e)}")
        summary_analysis = {
            "main_summary": "Error generating summary",
            "error": str(e)
        }

    # Compile final results
    return {
        "summary": summary_analysis,
        "raw_results": {
            "concepts": concept_results,
            "overview": {
                "topics": overview_results.overview,
                "cross_references": overview_results.cross_references
            },
            "relevant_chunks": [
                {
                    "text": chunk.text,
                    "metadata": chunk.metadata,
                    "cluster_id": chunk.cluster_id
                }
                for chunk in basic_results
            ]
        },
        "metadata": {
            "query": query,
            "timestamp": time.time(),
            "retrieval_params": {
                "k": k,
                "min_similarity": min_similarity,
                "cross_ref_depth": cross_ref_depth,
                "max_cross_refs": max_cross_refs
            }
        }
    }
RelevanceAssessment

Bases: BaseModel

Represents an assessment of the relevance of the data in relation to a specific query.

Attributes:

Name Type Description
query_alignment float

A float representing the alignment between the query and the data.

confidence_score float

A float indicating the confidence level in the alignment.

coverage_analysis str

A textual description analyzing the data coverage.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
139
140
141
142
143
144
145
146
147
148
149
150
class RelevanceAssessment(BaseModel):
    """
    Represents an assessment of the relevance of the data in relation to a specific query.

    Attributes:
        query_alignment (float): A float representing the alignment between the query and the data.
        confidence_score (float): A float indicating the confidence level in the alignment.
        coverage_analysis (str): A textual description analyzing the data coverage.
    """
    query_alignment: float
    confidence_score: float
    coverage_analysis: str
RetrievalResult dataclass

Structure for organizing retrieval results

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
37
38
39
40
41
42
@dataclass
class RetrievalResult:
    """Structure for organizing retrieval results"""
    overview: list[dict[str, any]]  # List of topic summaries
    details: list[Chunk]  # Detailed chunks
    cross_references: dict[str, list[Chunk]]  # Related chunks by topic
TConcept

Bases: BaseModel

Represents the criteria or target parameters for concept selection and filtering.

Attributes:

Name Type Description
min_importance float

The minimum importance score a concept must have to be considered.

target_concepts List[str]

A list of names of target concepts to focus on.

relationship_types List[str]

A list of relationship types to be considered in the analysis.

categories List[str]

A list of concept categories to filter or group the concepts.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class TConcept(BaseModel):
    """
    Represents the criteria or target parameters for concept selection and filtering.

    Attributes:
        min_importance (float): The minimum importance score a concept must have to be considered.
        target_concepts (List[str]): A list of names of target concepts to focus on.
        relationship_types (List[str]): A list of relationship types to be considered in the analysis.
        categories (List[str]): A list of concept categories to filter or group the concepts.
    """
    min_importance: float
    target_concepts: list[str]
    relationship_types: list[str]
    categories: list[str]
TextSplitter
Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
class TextSplitter:
    def __init__(
        self,
        chunk_size: int = 3600,
        chunk_overlap: int = 130,
        separator: str = "\n"
    ):
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.separator = separator

    def approximate(self, text_len: int) -> float:
        """
        Approximate the number of chunks and average chunk size for a given text length

        Args:
            text_len (int): Length of the text to be split

        Returns:
            Tuple[int, int]: (number_of_chunks, approximate_chunk_size)
        """
        if text_len <= self.chunk_size:
            return 1, text_len

        # Handle extreme overlap cases
        if self.chunk_overlap >= self.chunk_size:
            estimated_chunks = text_len
            return estimated_chunks, 1

        # Calculate based on overlap ratio
        overlap_ratio = self.chunk_overlap / self.chunk_size
        base_chunks = text_len / self.chunk_size
        estimated_chunks = base_chunks * 2 / (overlap_ratio if overlap_ratio > 0 else 1)

        # print('#',estimated_chunks, base_chunks, overlap_ratio)
        # Calculate average chunk size
        avg_chunk_size = max(1, text_len / estimated_chunks)

        return estimated_chunks * avg_chunk_size

    def split_text(self, text: str) -> list[str]:
        """Split text into chunks with overlap"""
        # Clean and normalize text
        text = re.sub(r'\s+', ' ', text).strip()

        # If text is shorter than chunk_size, return as is
        if len(text) <= self.chunk_size:
            return [text]

        chunks = []
        start = 0

        while start < len(text):
            # Find end of chunk
            end = start + self.chunk_size

            if end >= len(text):
                chunks.append(text[start:])
                break

            # Try to find a natural break point
            last_separator = text.rfind(self.separator, start, end)
            if last_separator != -1:
                end = last_separator

            # Add chunk
            chunks.append(text[start:end])

            # Calculate allowed overlap for this chunk
            chunk_length = end - start
            allowed_overlap = min(self.chunk_overlap, chunk_length - 1)

            # Move start position considering adjusted overlap
            start = end - allowed_overlap

        return chunks
approximate(text_len)

Approximate the number of chunks and average chunk size for a given text length

Parameters:

Name Type Description Default
text_len int

Length of the text to be split

required

Returns:

Type Description
float

Tuple[int, int]: (number_of_chunks, approximate_chunk_size)

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
def approximate(self, text_len: int) -> float:
    """
    Approximate the number of chunks and average chunk size for a given text length

    Args:
        text_len (int): Length of the text to be split

    Returns:
        Tuple[int, int]: (number_of_chunks, approximate_chunk_size)
    """
    if text_len <= self.chunk_size:
        return 1, text_len

    # Handle extreme overlap cases
    if self.chunk_overlap >= self.chunk_size:
        estimated_chunks = text_len
        return estimated_chunks, 1

    # Calculate based on overlap ratio
    overlap_ratio = self.chunk_overlap / self.chunk_size
    base_chunks = text_len / self.chunk_size
    estimated_chunks = base_chunks * 2 / (overlap_ratio if overlap_ratio > 0 else 1)

    # print('#',estimated_chunks, base_chunks, overlap_ratio)
    # Calculate average chunk size
    avg_chunk_size = max(1, text_len / estimated_chunks)

    return estimated_chunks * avg_chunk_size
split_text(text)

Split text into chunks with overlap

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
def split_text(self, text: str) -> list[str]:
    """Split text into chunks with overlap"""
    # Clean and normalize text
    text = re.sub(r'\s+', ' ', text).strip()

    # If text is shorter than chunk_size, return as is
    if len(text) <= self.chunk_size:
        return [text]

    chunks = []
    start = 0

    while start < len(text):
        # Find end of chunk
        end = start + self.chunk_size

        if end >= len(text):
            chunks.append(text[start:])
            break

        # Try to find a natural break point
        last_separator = text.rfind(self.separator, start, end)
        if last_separator != -1:
            end = last_separator

        # Add chunk
        chunks.append(text[start:end])

        # Calculate allowed overlap for this chunk
        chunk_length = end - start
        allowed_overlap = min(self.chunk_overlap, chunk_length - 1)

        # Move start position considering adjusted overlap
        start = end - allowed_overlap

    return chunks
TopicInsights

Bases: BaseModel

Represents insights related to various topics.

Attributes:

Name Type Description
primary_topics list[str]

A list of main topics addressed.

cross_references list[str]

A list of cross-references that connect different topics.

knowledge_gaps list[str]

A list of identified gaps in the current knowledge.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
125
126
127
128
129
130
131
132
133
134
135
136
class TopicInsights(BaseModel):
    """
    Represents insights related to various topics.

    Attributes:
        primary_topics (list[str]): A list of main topics addressed.
        cross_references (list[str]): A list of cross-references that connect different topics.
        knowledge_gaps (list[str]): A list of identified gaps in the current knowledge.
    """
    primary_topics: list[str]
    cross_references: list[str]
    knowledge_gaps: list[str]
rConcept

Bases: BaseModel

Represents a key concept with its relationships and associated metadata.

Attributes:

Name Type Description
name str

The name of the concept.

category str

The category of the concept (e.g., 'technical', 'domain', 'method', etc.).

relationships Dict[str, List[str]]

A mapping where each key is a type of relationship and the value is a list of related concept names.

importance_score float

A numerical score representing the importance or relevance of the concept.

context_snippets List[str]

A list of text snippets providing context where the concept appears.

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
class rConcept(BaseModel):
    """
    Represents a key concept with its relationships and associated metadata.

    Attributes:
        name (str): The name of the concept.
        category (str): The category of the concept (e.g., 'technical', 'domain', 'method', etc.).
        relationships (Dict[str, List[str]]): A mapping where each key is a type of relationship and the
            value is a list of related concept names.
        importance_score (float): A numerical score representing the importance or relevance of the concept.
        context_snippets (List[str]): A list of text snippets providing context where the concept appears.
    """
    name: str
    category: str
    relationships: dict[str, list[str]]
    importance_score: float
    context_snippets: list[str]
normalize_vectors(vectors)

Normalize vectors to unit length

Source code in toolboxv2/mods/isaa/base/KnowledgeBase.py
52
53
54
55
def normalize_vectors(vectors: np.ndarray) -> np.ndarray:
    """Normalize vectors to unit length"""
    norms = np.linalg.norm(vectors, axis=1, keepdims=True)
    return np.divide(vectors, norms, where=norms != 0)
VectorStores

Vector store implementations for the toolboxv2 system.

taichiNumpyNumbaVectorStores
NumpyVectorStore

Bases: AbstractVectorStore

Source code in toolboxv2/mods/isaa/base/VectorStores/taichiNumpyNumbaVectorStores.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
class NumpyVectorStore(AbstractVectorStore):
    def __init__(self, use_gpu=False):
        self.embeddings = np.empty((0, 0))
        self.chunks = []
        # Initialize Taich


        self.normalized_embeddings = None

    def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
        if len(embeddings.shape) != 2:
            raise ValueError("Embeddings must be 2D array")
        if len(chunks) != embeddings.shape[0]:
            raise ValueError("Mismatch between embeddings and chunks count")

        if self.embeddings.size == 0:
            self.embeddings = embeddings
        else:
            if embeddings.shape[1] != self.embeddings.shape[1]:
                raise ValueError("Embedding dimensions must match")
            self.embeddings = np.vstack([self.embeddings, embeddings])
        self.chunks.extend(chunks)
        # Reset normalized embeddings cache
        self.normalized_embeddings = None

    def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
        if self.embeddings.size == 0:
            return []

        # Pre-compute normalized embeddings if not cached
        if self.normalized_embeddings is None:
            self._precompute_normalized_embeddings()

        # Normalize query
        query_norm = self._normalize_vector(query_embedding)

        # Enhanced Taichi kernel for similarity computation
        n = len(self.chunks)
        similarities = np.zeros(n, dtype=np.float32)

        @ti.kernel
        def compute_similarities_optimized(
            query: ti.types.ndarray(dtype=ti.f32),
            embeddings: ti.types.ndarray(dtype=ti.f32),
            similarities: ti.types.ndarray(dtype=ti.f32),
            n: ti.i32,
            dim: ti.i32
        ):
            ti.loop_config(block_dim=256)
            for i in range(n):
                dot_product = 0.0
                # Vectorized dot product computation
                for j in range(dim):
                    dot_product += embeddings[i, j] * query[j]
                similarities[i] = dot_product

        # Alternative optimized kernel using tile-based computation
        @ti.kernel
        def compute_similarities_tiled(
            query: ti.types.ndarray(dtype=ti.f32),
            embeddings: ti.types.ndarray(dtype=ti.f32),
            similarities: ti.types.ndarray(dtype=ti.f32),
            n: ti.i32,
            dim: ti.i32
        ):
            tile_size = 16  # Adjust based on hardware
            for i in range(n):
                dot_product = 0.0
                # Process in tiles for better cache utilization
                for jt in range(0, dim):
                    if jt % tile_size != 0:
                        continue
                    tile_sum = 0.0
                    for j in range(jt, ti.min(jt + tile_size, dim)):
                        tile_sum += embeddings[i, j] * query[j]
                    dot_product += tile_sum
                similarities[i] = dot_product

        # Choose the appropriate kernel based on dimension size
        if query_embedding.shape[0] >= 256:
            compute_similarities_tiled(
                query_norm.astype(np.float32),
                self.normalized_embeddings,
                similarities,
                n,
                query_embedding.shape[0]
            )
        else:
            compute_similarities_optimized(
                query_norm.astype(np.float32),
                self.normalized_embeddings,
                similarities,
                n,
                query_embedding.shape[0]
            )

        # Optimize top-k selection
        if k >= n:
            indices = np.argsort(-similarities)
        else:
            # Use partial sort for better performance when k < n
            indices = np.argpartition(-similarities, k)[:k]
            indices = indices[np.argsort(-similarities[indices])]

        # Filter results efficiently using vectorized operations
        mask = similarities[indices] >= min_similarity
        filtered_indices = indices[mask]
        return [self.chunks[idx] for idx in filtered_indices[:k]]

    def save(self) -> bytes:
        return pickle.dumps({
            'embeddings': self.embeddings,
            'chunks': self.chunks
        })

    def load(self, data: bytes) -> 'NumpyVectorStore':
        loaded = pickle.loads(data)
        self.embeddings = loaded['embeddings']
        self.chunks = loaded['chunks']
        return self

    def clear(self) -> None:
        self.embeddings = np.empty((0, 0))
        self.chunks = []
        self.normalized_embeddings = None

    def rebuild_index(self) -> None:
        pass  # No index to rebuild for numpy implementation

    def _normalize_vector(self, vector: np.ndarray) -> np.ndarray:
        """Normalize a single vector efficiently."""
        return vector / (np.linalg.norm(vector) + 1e-8)

    def _precompute_normalized_embeddings(self) -> None:
        """Pre-compute and cache normalized embeddings."""
        # Allocate output array
        self.normalized_embeddings = np.empty_like(self.embeddings, dtype=np.float32)

        # Normalize embeddings using Taichi
        batch_normalize(
            self.embeddings.astype(np.float32),
            self.normalized_embeddings,
            self.embeddings.shape[0],
            self.embeddings.shape[1]
        )
types
AbstractVectorStore

Bases: ABC

Abstract base class for vector stores

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
class AbstractVectorStore(ABC):
    """Abstract base class for vector stores"""

    @abstractmethod
    def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
        """Add embeddings and their corresponding chunks to the store"""
        pass

    @abstractmethod
    def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
        """Search for similar vectors"""
        pass

    @abstractmethod
    def save(self) -> bytes:
        """Save the vector store to disk"""
        pass

    @abstractmethod
    def load(self, data: bytes) -> 'AbstractVectorStore':
        """Load the vector store from disk"""
        pass

    @abstractmethod
    def clear(self) -> None:
        """Clear all data from the store"""
        pass

    @abstractmethod
    def rebuild_index(self) -> None:
        """Optional for faster searches"""
        pass
add_embeddings(embeddings, chunks) abstractmethod

Add embeddings and their corresponding chunks to the store

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
21
22
23
24
@abstractmethod
def add_embeddings(self, embeddings: np.ndarray, chunks: list[Chunk]) -> None:
    """Add embeddings and their corresponding chunks to the store"""
    pass
clear() abstractmethod

Clear all data from the store

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
41
42
43
44
@abstractmethod
def clear(self) -> None:
    """Clear all data from the store"""
    pass
load(data) abstractmethod

Load the vector store from disk

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
36
37
38
39
@abstractmethod
def load(self, data: bytes) -> 'AbstractVectorStore':
    """Load the vector store from disk"""
    pass
rebuild_index() abstractmethod

Optional for faster searches

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
46
47
48
49
@abstractmethod
def rebuild_index(self) -> None:
    """Optional for faster searches"""
    pass
save() abstractmethod

Save the vector store to disk

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
31
32
33
34
@abstractmethod
def save(self) -> bytes:
    """Save the vector store to disk"""
    pass
search(query_embedding, k=5, min_similarity=0.7) abstractmethod

Search for similar vectors

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
26
27
28
29
@abstractmethod
def search(self, query_embedding: np.ndarray, k: int = 5, min_similarity: float = 0.7) -> list[Chunk]:
    """Search for similar vectors"""
    pass
Chunk dataclass

Represents a chunk of text with its embedding and metadata

Source code in toolboxv2/mods/isaa/base/VectorStores/types.py
 8
 9
10
11
12
13
14
15
@dataclass(slots=True)
class Chunk:
    """Represents a chunk of text with its embedding and metadata"""
    text: str
    embedding: np.ndarray
    metadata: dict[str, Any]
    content_hash: str
    cluster_id: int | None = None

extras

adapter
LiteLLM LLM Interface Module

This module provides interfaces for interacting with LiteLLM's language models, including text generation and embedding capabilities.

Author: Lightrag Team Created: 2025-02-04 License: MIT License Version: 1.0.0

Change Log: - 1.0.0 (2025-02-04): Initial LiteLLM release * Ported OpenAI logic to use litellm async client * Updated error types and environment variable names * Preserved streaming and embedding support

Dependencies
  • litellm
  • numpy
  • pipmaster
  • Python >= 3.10
Usage

from llm_interfaces.litellm import logging

if not hasattr(logging, 'NONE'): logging.NONE = 100

import litellm_complete, litellm_embed

litellm_complete(prompt, system_prompt=None, history_messages=None, keyword_extraction=False, model_name='groq/gemma2-9b-it', **kwargs) async

Public completion interface using the model name specified in the global configuration. Optionally extracts keywords if requested.

Source code in toolboxv2/mods/isaa/extras/adapter.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
async def litellm_complete(
    prompt, system_prompt=None, history_messages=None, keyword_extraction=False, model_name = "groq/gemma2-9b-it", **kwargs
) -> str | AsyncIterator[str]:
    """
    Public completion interface using the model name specified in the global configuration.
    Optionally extracts keywords if requested.
    """
    if history_messages is None:
        history_messages = []
    # Check and set response format for keyword extraction if needed
    keyword_extraction_flag = kwargs.pop("keyword_extraction", None)
    if keyword_extraction_flag:
        kwargs["response_format"] = "json"

    if "response_format" in kwargs:
        if isinstance(kwargs["response_format"], dict):
            kwargs["response_format"] = enforce_no_additional_properties(kwargs["response_format"])
        elif isinstance(kwargs["response_format"], str):
            pass
        else:
            kwargs["response_format"] = enforce_no_additional_properties(kwargs["response_format"].model_json_schema())  # oder .schema() in v1
     # kwargs["hashing_kv"].global_config["llm_model_name"]

    if any(x in model_name for x in ["mistral", "mixtral"]):
        kwargs.pop("response_format", None)

    return await litellm_complete_if_cache(
        model_name,
        prompt,
        system_prompt=system_prompt,
        history_messages=history_messages,
        **kwargs,
    )
litellm_complete_if_cache(model, prompt, system_prompt=None, history_messages=None, base_url=None, api_key=None, **kwargs) async

Core function to query the LiteLLM model. It builds the message context, invokes the completion API, and returns either a complete result string or an async iterator for streaming responses.

Source code in toolboxv2/mods/isaa/extras/adapter.py
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=10),
    retry=retry_if_exception_type((RateLimitError, Timeout, APIConnectionError)),
)
async def litellm_complete_if_cache(
    model,
    prompt,
    system_prompt=None,
    history_messages=None,
    base_url=None,
    api_key=None,
    **kwargs,
) -> str | AsyncIterator[str]:
    """
    Core function to query the LiteLLM model. It builds the message context,
    invokes the completion API, and returns either a complete result string or
    an async iterator for streaming responses.
    """
    # Set the API key if provided
    if api_key:
        os.environ["LITELLM_API_KEY"] = api_key

    # Remove internal keys not needed for the client call
    kwargs.pop("hashing_kv", None)
    kwargs.pop("keyword_extraction", None)

    fallbacks_ = kwargs.pop("fallbacks", [])
    # Build the messages list from system prompt, conversation history, and the new prompt
    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    if history_messages is not None:
        messages.extend(history_messages)
    messages.append({"role": "user", "content": prompt})

    # Log query details for debugging purposes
    try:
        # Depending on the response format, choose the appropriate API call
        if "response_format" in kwargs:
            response = await acompletion(
                model=model, messages=messages,
                fallbacks=fallbacks_+os.getenv("FALLBACKS_MODELS", '').split(','),
                **kwargs
            )
        else:
            response = await acompletion(
                model=model, messages=messages,
                fallbacks=os.getenv("FALLBACKS_MODELS", '').split(','),
                **kwargs
            )
    except Exception as e:
        print(f"\n{model=}\n{prompt=}\n{system_prompt=}\n{history_messages=}\n{base_url=}\n{api_key=}\n{kwargs=}")
        get_logger().error(f"Failed to litellm memory work {e}")
        return ""

    # Check if the response is a streaming response (i.e. an async iterator)
    if hasattr(response, "__aiter__"):

        async def inner():
            async for chunk in response:
                # Assume LiteLLM response structure is similar to OpenAI's
                content = chunk.choices[0].delta.content
                if content is None:
                    continue
                yield content

        return inner()
    else:
        # Non-streaming: extract and return the full content string

        content = response.choices[0].message.content
        if content is None:
            content = response.choices[0].message.tool_calls[0].function.arguments
        return content
litellm_embed(texts, model='gemini/text-embedding-004', base_url=None, api_key=None) async

Generates embeddings for the given list of texts using LiteLLM.

Source code in toolboxv2/mods/isaa/extras/adapter.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
@retry(
    stop=stop_after_attempt(3),
    wait=wait_exponential(multiplier=1, min=4, max=60),
    retry=retry_if_exception_type((RateLimitError, Timeout, APIConnectionError)),
)
async def litellm_embed(
    texts: list[str],
    model: str = "gemini/text-embedding-004",
    base_url: str = None,
    api_key: str = None,
) -> np.ndarray:
    """
    Generates embeddings for the given list of texts using LiteLLM.
    """
    response = await litellm.aembedding(
        model=model, input=texts,
        # encoding_format="float"
    )
    return np.array([dp.embedding for dp in response.data])
mcp_session_manager
MCPSessionManager

Manages persistent MCP sessions with automatic reconnection and parallel processing

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
class MCPSessionManager:
    """Manages persistent MCP sessions with automatic reconnection and parallel processing"""

    def __init__(self):
        self.sessions: dict[str, ClientSession] = {}
        self.connections: dict[str, Any] = {}
        self.capabilities_cache: dict[str, dict] = {}
        self.retry_count: dict[str, int] = {}
        self.max_retries = 3
        self.connection_timeout = 15.0  # 10 seconds timeout
        self.operation_timeout = 10.0  # 5 seconds for operations

    async def get_session_with_timeout(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
        """Get session with timeout protection"""
        try:
            return await asyncio.wait_for(
                self.get_session(server_name, server_config),
                timeout=self.connection_timeout
            )
        except TimeoutError:
            eprint(f"MCP session creation timeout for {server_name}")
            return None

    async def get_session(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
        """Get or create persistent MCP session with proper context management"""
        if server_name in self.sessions:
            try:
                # Test if session is still alive with timeout
                session = self.sessions[server_name]
                # Quick connectivity test
                await asyncio.wait_for(session.list_tools(), timeout=2.0)
                return session
            except Exception as e:
                wprint(f"MCP session {server_name} failed, recreating: {e}")
                # Clean up the old session
                if server_name in self.sessions:
                    del self.sessions[server_name]
                if server_name in self.connections:
                    del self.connections[server_name]

        return await self._create_session(server_name, server_config)

    async def _create_session(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
        """Create new MCP session with improved error handling"""
        try:
            command = server_config.get('command')
            args = server_config.get('args', [])
            env = server_config.get('env', {})
            transport_type = server_config.get('transport', 'stdio')

            if not command:
                eprint(f"No command specified for MCP server {server_name}")
                return None

            iprint(f"Creating MCP session for {server_name} (transport: {transport_type})")

            session = None

            # Create connection based on transport type
            if transport_type == 'stdio':
                session = await self._create_stdio_session(server_name, command, args, env)
            elif transport_type in ['http', 'streamable-http']:
                session = await self._create_http_session(server_name, server_config)
            else:
                eprint(f"Unsupported transport type: {transport_type}")
                return None

            if session:
                self.sessions[server_name] = session
                self.retry_count[server_name] = 0
                iprint(f"✓ MCP session created successfully: {server_name}")
                return session

            return None

        except Exception as e:
            self.retry_count[server_name] = self.retry_count.get(server_name, 0) + 1
            if self.retry_count[server_name] <= self.max_retries:
                wprint(f"MCP session creation failed (attempt {self.retry_count[server_name]}/{self.max_retries}): {e}")
                await asyncio.sleep(1.0)  # Longer delay before retry
                return await self._create_session(server_name, server_config)
            else:
                eprint(f"✗ MCP session creation failed after {self.max_retries} attempts: {e}")
                return None

    async def _create_stdio_session(self, server_name: str, command: str, args: list[str], env: dict[str, str]) -> \
    ClientSession | None:
        """Create stdio MCP session with fixed async context handling"""
        try:
            from mcp import StdioServerParameters
            from mcp.client.stdio import stdio_client

            # Prepare environment
            process_env = os.environ.copy()
            process_env.update(env)

            server_params = StdioServerParameters(
                command=command,
                args=args,
                env=process_env
            )

            # Create the stdio client and session in a single task context
            stdio_connection = stdio_client(server_params)

            # Enter the context manager
            read_stream, write_stream = await stdio_connection.__aenter__()

            # Store the connection for cleanup later
            self.connections[server_name] = stdio_connection

            # Create session
            session = ClientSession(read_stream, write_stream)

            # Initialize session in the same context
            await session.__aenter__()
            await asyncio.wait_for(session.initialize(), timeout=self.connection_timeout)

            return session

        except Exception as e:
            eprint(f"Failed to create stdio session for {server_name}: {e}")
            # Cleanup on failure
            if server_name in self.connections:
                with contextlib.suppress(Exception):
                    await self.connections[server_name].__aexit__(None, None, None)
                del self.connections[server_name]
            return None

    async def _create_http_session(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
        """Create HTTP MCP session with timeout"""
        try:
            from mcp.client.streamable_http import streamablehttp_client

            url = server_config.get('url', f"http://localhost:{server_config.get('port', 8000)}/mcp")

            connection = streamablehttp_client(url)
            read_stream, write_stream, cleanup = await asyncio.wait_for(
                connection.__aenter__(),
                timeout=self.connection_timeout
            )

            session = ClientSession(read_stream, write_stream)
            await session.__aenter__()
            await asyncio.wait_for(
                session.initialize(),
                timeout=self.connection_timeout
            )

            self.connections[server_name] = connection
            return session

        except Exception as e:
            eprint(f"Failed to create HTTP session for {server_name}: {e}")
            return None

    async def extract_capabilities_with_timeout(self, session: ClientSession, server_name: str) -> dict[str, dict]:
        """Extract capabilities with timeout protection"""
        try:
            return await asyncio.wait_for(
                self.extract_capabilities(session, server_name),
                timeout=self.operation_timeout
            )
        except TimeoutError:
            eprint(f"Capability extraction timeout for {server_name}")
            return {'tools': {}, 'resources': {}, 'resource_templates': {}, 'prompts': {}, 'images': {}}

    async def extract_capabilities(self, session: ClientSession, server_name: str) -> dict[str, dict]:
        """Extract all capabilities from MCP session"""
        if server_name in self.capabilities_cache:
            return self.capabilities_cache[server_name]

        capabilities = {
            'tools': {},
            'resources': {},
            'resource_templates': {},
            'prompts': {},
            'images': {}
        }

        try:
            # Extract tools with individual timeouts
            try:
                tools_response = await asyncio.wait_for(session.list_tools(), timeout=3.0)
                for tool in tools_response.tools:
                    capabilities['tools'][tool.name] = {
                        'name': tool.name,
                        'description': tool.description or '',
                        'input_schema': tool.inputSchema,
                        'output_schema': getattr(tool, 'outputSchema', None),
                        'display_name': getattr(tool, 'title', tool.name)
                    }
            except TimeoutError:
                wprint(f"Tools extraction timeout for {server_name}")
            except Exception as e:
                wprint(f"Failed to extract tools from {server_name}: {e}")

            # Extract resources with timeout
            try:
                resources_response = await asyncio.wait_for(session.list_resources(), timeout=3.0)
                for resource in resources_response.resources:
                    capabilities['resources'][str(resource.uri)] = {
                        'uri': str(resource.uri),
                        'name': resource.name or str(resource.uri),
                        'description': resource.description or '',
                        'mime_type': getattr(resource, 'mimeType', None)
                    }
            except TimeoutError:
                wprint(f"Resources extraction timeout for {server_name}")
            except Exception as e:
                wprint(f"Failed to extract resources from {server_name}: {e}")

            # Extract resource templates with timeout
            try:
                templates_response = await asyncio.wait_for(session.list_resource_templates(), timeout=3.0)
                for template in templates_response.resourceTemplates:
                    capabilities['resource_templates'][template.uriTemplate] = {
                        'uri_template': template.uriTemplate,
                        'name': template.name or template.uriTemplate,
                        'description': template.description or ''
                    }
            except TimeoutError:
                wprint(f"Resource templates extraction timeout for {server_name}")
            except Exception as e:
                wprint(f"Failed to extract resource templates from {server_name}: {e}")

            # Extract prompts with timeout
            try:
                prompts_response = await asyncio.wait_for(session.list_prompts(), timeout=3.0)
                for prompt in prompts_response.prompts:
                    capabilities['prompts'][prompt.name] = {
                        'name': prompt.name,
                        'description': prompt.description or '',
                        'arguments': [
                            {
                                'name': arg.name,
                                'description': arg.description or '',
                                'required': arg.required
                            } for arg in (prompt.arguments or [])
                        ]
                    }
            except TimeoutError:
                wprint(f"Prompts extraction timeout for {server_name}")
            except Exception as e:
                wprint(f"Failed to extract prompts from {server_name}: {e}")

            self.capabilities_cache[server_name] = capabilities

            total_caps = (len(capabilities['tools']) + len(capabilities['resources']) +
                          len(capabilities['resource_templates']) + len(capabilities['prompts']))
            iprint(f"✓ Extracted {total_caps} capabilities from {server_name}")

        except Exception as e:
            eprint(f"Failed to extract capabilities from {server_name}: {e}")

        return capabilities

    async def _cleanup_session(self, server_name: str):
        """Clean up a specific session with proper context management"""
        try:
            # Clean up session first
            if server_name in self.sessions:
                try:
                    session = self.sessions[server_name]
                    await asyncio.wait_for(session.__aexit__(None, None, None), timeout=2.0)
                except (TimeoutError, Exception) as e:
                    wprint(f"Session cleanup warning for {server_name}: {e}")
                finally:
                    del self.sessions[server_name]

            # Clean up connection
            if server_name in self.connections:
                try:
                    connection = self.connections[server_name]
                    await asyncio.wait_for(connection.__aexit__(None, None, None), timeout=2.0)
                except (TimeoutError, Exception) as e:
                    wprint(f"Connection cleanup warning for {server_name}: {e}")
                finally:
                    del self.connections[server_name]

            # Clear cache
            if server_name in self.capabilities_cache:
                del self.capabilities_cache[server_name]

            # Reset retry count
            if server_name in self.retry_count:
                del self.retry_count[server_name]

        except Exception as e:
            wprint(f"Cleanup error for {server_name}: {e}")

    async def cleanup_all(self):
        """Clean up all sessions with timeout"""
        cleanup_tasks = []
        for server_name in list(self.sessions.keys()):
            task = asyncio.create_task(self._cleanup_session(server_name))
            cleanup_tasks.append(task)

        if cleanup_tasks:
            try:
                await asyncio.wait_for(
                    asyncio.gather(*cleanup_tasks, return_exceptions=True),
                    timeout=5.0
                )
            except TimeoutError:
                wprint("MCP session cleanup timeout")
cleanup_all() async

Clean up all sessions with timeout

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
async def cleanup_all(self):
    """Clean up all sessions with timeout"""
    cleanup_tasks = []
    for server_name in list(self.sessions.keys()):
        task = asyncio.create_task(self._cleanup_session(server_name))
        cleanup_tasks.append(task)

    if cleanup_tasks:
        try:
            await asyncio.wait_for(
                asyncio.gather(*cleanup_tasks, return_exceptions=True),
                timeout=5.0
            )
        except TimeoutError:
            wprint("MCP session cleanup timeout")
extract_capabilities(session, server_name) async

Extract all capabilities from MCP session

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
async def extract_capabilities(self, session: ClientSession, server_name: str) -> dict[str, dict]:
    """Extract all capabilities from MCP session"""
    if server_name in self.capabilities_cache:
        return self.capabilities_cache[server_name]

    capabilities = {
        'tools': {},
        'resources': {},
        'resource_templates': {},
        'prompts': {},
        'images': {}
    }

    try:
        # Extract tools with individual timeouts
        try:
            tools_response = await asyncio.wait_for(session.list_tools(), timeout=3.0)
            for tool in tools_response.tools:
                capabilities['tools'][tool.name] = {
                    'name': tool.name,
                    'description': tool.description or '',
                    'input_schema': tool.inputSchema,
                    'output_schema': getattr(tool, 'outputSchema', None),
                    'display_name': getattr(tool, 'title', tool.name)
                }
        except TimeoutError:
            wprint(f"Tools extraction timeout for {server_name}")
        except Exception as e:
            wprint(f"Failed to extract tools from {server_name}: {e}")

        # Extract resources with timeout
        try:
            resources_response = await asyncio.wait_for(session.list_resources(), timeout=3.0)
            for resource in resources_response.resources:
                capabilities['resources'][str(resource.uri)] = {
                    'uri': str(resource.uri),
                    'name': resource.name or str(resource.uri),
                    'description': resource.description or '',
                    'mime_type': getattr(resource, 'mimeType', None)
                }
        except TimeoutError:
            wprint(f"Resources extraction timeout for {server_name}")
        except Exception as e:
            wprint(f"Failed to extract resources from {server_name}: {e}")

        # Extract resource templates with timeout
        try:
            templates_response = await asyncio.wait_for(session.list_resource_templates(), timeout=3.0)
            for template in templates_response.resourceTemplates:
                capabilities['resource_templates'][template.uriTemplate] = {
                    'uri_template': template.uriTemplate,
                    'name': template.name or template.uriTemplate,
                    'description': template.description or ''
                }
        except TimeoutError:
            wprint(f"Resource templates extraction timeout for {server_name}")
        except Exception as e:
            wprint(f"Failed to extract resource templates from {server_name}: {e}")

        # Extract prompts with timeout
        try:
            prompts_response = await asyncio.wait_for(session.list_prompts(), timeout=3.0)
            for prompt in prompts_response.prompts:
                capabilities['prompts'][prompt.name] = {
                    'name': prompt.name,
                    'description': prompt.description or '',
                    'arguments': [
                        {
                            'name': arg.name,
                            'description': arg.description or '',
                            'required': arg.required
                        } for arg in (prompt.arguments or [])
                    ]
                }
        except TimeoutError:
            wprint(f"Prompts extraction timeout for {server_name}")
        except Exception as e:
            wprint(f"Failed to extract prompts from {server_name}: {e}")

        self.capabilities_cache[server_name] = capabilities

        total_caps = (len(capabilities['tools']) + len(capabilities['resources']) +
                      len(capabilities['resource_templates']) + len(capabilities['prompts']))
        iprint(f"✓ Extracted {total_caps} capabilities from {server_name}")

    except Exception as e:
        eprint(f"Failed to extract capabilities from {server_name}: {e}")

    return capabilities
extract_capabilities_with_timeout(session, server_name) async

Extract capabilities with timeout protection

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
169
170
171
172
173
174
175
176
177
178
async def extract_capabilities_with_timeout(self, session: ClientSession, server_name: str) -> dict[str, dict]:
    """Extract capabilities with timeout protection"""
    try:
        return await asyncio.wait_for(
            self.extract_capabilities(session, server_name),
            timeout=self.operation_timeout
        )
    except TimeoutError:
        eprint(f"Capability extraction timeout for {server_name}")
        return {'tools': {}, 'resources': {}, 'resource_templates': {}, 'prompts': {}, 'images': {}}
get_session(server_name, server_config) async

Get or create persistent MCP session with proper context management

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
async def get_session(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
    """Get or create persistent MCP session with proper context management"""
    if server_name in self.sessions:
        try:
            # Test if session is still alive with timeout
            session = self.sessions[server_name]
            # Quick connectivity test
            await asyncio.wait_for(session.list_tools(), timeout=2.0)
            return session
        except Exception as e:
            wprint(f"MCP session {server_name} failed, recreating: {e}")
            # Clean up the old session
            if server_name in self.sessions:
                del self.sessions[server_name]
            if server_name in self.connections:
                del self.connections[server_name]

    return await self._create_session(server_name, server_config)
get_session_with_timeout(server_name, server_config) async

Get session with timeout protection

Source code in toolboxv2/mods/isaa/extras/mcp_session_manager.py
25
26
27
28
29
30
31
32
33
34
async def get_session_with_timeout(self, server_name: str, server_config: dict[str, Any]) -> ClientSession | None:
    """Get session with timeout protection"""
    try:
        return await asyncio.wait_for(
            self.get_session(server_name, server_config),
            timeout=self.connection_timeout
        )
    except TimeoutError:
        eprint(f"MCP session creation timeout for {server_name}")
        return None
modes
generate_prompt(subject, context='', additional_requirements=None)

Generates a prompt based on the given subject, with optional context and additional requirements.

Parameters: - subject (str): The main subject for the prompt. - context (str): Optional additional context to tailor the prompt. - additional_requirements (Dict[str, Any]): Optional additional parameters or requirements for the prompt.

Returns: - str: A crafted prompt.

Source code in toolboxv2/mods/isaa/extras/modes.py
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def generate_prompt(subject: str, context: str = "", additional_requirements: dict[str, Any] = None) -> str:
    """
    Generates a prompt based on the given subject, with optional context and additional requirements.

    Parameters:
    - subject (str): The main subject for the prompt.
    - context (str): Optional additional context to tailor the prompt.
    - additional_requirements (Dict[str, Any]): Optional additional parameters or requirements for the prompt.

    Returns:
    - str: A crafted prompt.
    """
    prompt = f"Based on the subject '{subject}', with the context '{context}', generate a clear and precise instruction."
    if additional_requirements:
        prompt += f" Consider the following requirements: {additional_requirements}."
    return prompt
terminal_progress
ChainPrinter

Custom printer for enhanced chain visualization and progress display

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
class ChainPrinter:
    """Custom printer for enhanced chain visualization and progress display"""

    def __init__(self, verbose: bool = True):
        self.verbose = verbose
        self.colors = {
            'success': '\033[92m',
            'error': '\033[91m',
            'warning': '\033[93m',
            'info': '\033[94m',
            'highlight': '\033[95m',
            'dim': '\033[2m',
            'bold': '\033[1m',
            'reset': '\033[0m'
        }

    def _colorize(self, text: str, color: str) -> str:
        return f"{self.colors.get(color, '')}{text}{self.colors['reset']}"

    def print_header(self, title: str, subtitle: str = None):
        """Print formatted header"""
        print(f"\n{self._colorize('═' * 60, 'highlight')}")
        print(f"{self._colorize(f'🔗 {title}', 'bold')}")
        if subtitle:
            print(f"{self._colorize(subtitle, 'dim')}")
        print(f"{self._colorize('═' * 60, 'highlight')}\n")

    def print_success(self, message: str):
        print(f"{self._colorize('✅ ', 'success')}{message}")

    def print_error(self, message: str):
        print(f"{self._colorize('❌ ', 'error')}{message}")

    def print_warning(self, message: str):
        print(f"{self._colorize('⚠️ ', 'warning')}{message}")

    def print_info(self, message: str):
        print(f"{self._colorize('ℹ️ ', 'info')}{message}")

    def print_progress_start(self, chain_name: str):
        print(f"\n{self._colorize('🚀 Starting chain execution:', 'info')} {self._colorize(chain_name, 'bold')}")

    def print_task_start(self, task_name: str, current: int, total: int):
        progress = f"[{current + 1}/{total}]" if total > 0 else ""
        print(f"  {self._colorize('▶️ ', 'info')}{progress} {task_name}")

    def print_task_complete(self, task_name: str, completed: int, total: int):
        progress = f"[{completed}/{total}]" if total > 0 else ""
        print(f"  {self._colorize('✅', 'success')} {progress} {task_name} completed")

    def print_task_error(self, task_name: str, error: str):
        print(f"  {self._colorize('❌', 'error')} {task_name} failed: {error}")

    def print_progress_end(self, chain_name: str, duration: float, success: bool):
        status = self._colorize('✅ COMPLETED', 'success') if success else self._colorize('❌ FAILED', 'error')
        print(f"\n{status} {chain_name} ({duration:.2f}s)\n")

    def print_tool_usage_success(self, tool_name: str, duration: float, is_meta_tool: bool = False):
        if is_meta_tool:
            print(f"  {self._colorize('🔧 ', 'info')}{tool_name} completed ({duration:.2f}s)")
        else:
            print(f"  {self._colorize('🔩 ', 'info')}{tool_name} completed ({duration:.2f}s)")

    def print_tool_usage_error(self, tool_name: str, error: str, is_meta_tool: bool = False):
        if is_meta_tool:
            print(f"  {self._colorize('🔧 ', 'error')}{tool_name} failed: {error}")
        else:
            print(f"  {self._colorize('🔩 ', 'error')}{tool_name} failed: {error}")

    def print_outline_created(self, outline: dict):
        for step in outline.get("steps", []):
            print(f"  {self._colorize('📖 ', 'info')}Step: {self._colorize(step.get('description', 'Unknown'), 'dim')}")

    def print_reasoning_loop(self, loop_data: dict):
        print(f"  {self._colorize('🧠 ', 'info')}Reasoning Loop #{loop_data.get('loop_number', '?')}")
        print(
            f"    {self._colorize('📖 ', 'info')}Outline Step: {loop_data.get('outline_step', 0)} of {loop_data.get('outline_total', 0)}")
        print(f"    {self._colorize('📚 ', 'info')}Context Size: {loop_data.get('context_size', 0)} entries")
        print(f"    {self._colorize('📋 ', 'info')}Task Stack: {loop_data.get('task_stack_size', 0)} items")
        print(f"    {self._colorize('🔄 ', 'info')}Recovery Attempts: {loop_data.get('auto_recovery_attempts', 0)}")
        print(f"    {self._colorize('📊 ', 'info')}Performance Metrics: {loop_data.get('performance_metrics', {})}")

    def print_chain_list(self, chains: list[tuple[str, ChainMetadata]]):
        """Print formatted list of available chains"""
        if not chains:
            self.print_info("No chains found. Use 'create' to build your first chain.")
            return

        self.print_header("Available Chains", f"Total: {len(chains)}")

        for name, meta in chains:
            # Status indicators
            indicators = []
            if meta.has_parallels:
                indicators.append(self._colorize("⚡", "highlight"))
            if meta.has_conditionals:
                indicators.append(self._colorize("🔀", "warning"))
            if meta.has_error_handling:
                indicators.append(self._colorize("🛡️", "info"))

            status_str = " ".join(indicators) if indicators else ""

            # Complexity color
            complexity_colors = {"simple": "success", "medium": "warning", "complex": "error"}
            complexity = self._colorize(meta.complexity, complexity_colors.get(meta.complexity, "info"))

            print(f"  {self._colorize(name, 'bold')} {status_str}")
            print(f"    {meta.description or 'No description'}")
            print(f"    {complexity}{meta.agent_count} agents • {meta.version}")
            if meta.tags:
                tags_str = " ".join([f"#{tag}" for tag in meta.tags])
                print(f"    {self._colorize(tags_str, 'dim')}")
            print()
print_chain_list(chains)

Print formatted list of available chains

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
def print_chain_list(self, chains: list[tuple[str, ChainMetadata]]):
    """Print formatted list of available chains"""
    if not chains:
        self.print_info("No chains found. Use 'create' to build your first chain.")
        return

    self.print_header("Available Chains", f"Total: {len(chains)}")

    for name, meta in chains:
        # Status indicators
        indicators = []
        if meta.has_parallels:
            indicators.append(self._colorize("⚡", "highlight"))
        if meta.has_conditionals:
            indicators.append(self._colorize("🔀", "warning"))
        if meta.has_error_handling:
            indicators.append(self._colorize("🛡️", "info"))

        status_str = " ".join(indicators) if indicators else ""

        # Complexity color
        complexity_colors = {"simple": "success", "medium": "warning", "complex": "error"}
        complexity = self._colorize(meta.complexity, complexity_colors.get(meta.complexity, "info"))

        print(f"  {self._colorize(name, 'bold')} {status_str}")
        print(f"    {meta.description or 'No description'}")
        print(f"    {complexity}{meta.agent_count} agents • {meta.version}")
        if meta.tags:
            tags_str = " ".join([f"#{tag}" for tag in meta.tags])
            print(f"    {self._colorize(tags_str, 'dim')}")
        print()
print_header(title, subtitle=None)

Print formatted header

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1490
1491
1492
1493
1494
1495
1496
def print_header(self, title: str, subtitle: str = None):
    """Print formatted header"""
    print(f"\n{self._colorize('═' * 60, 'highlight')}")
    print(f"{self._colorize(f'🔗 {title}', 'bold')}")
    if subtitle:
        print(f"{self._colorize(subtitle, 'dim')}")
    print(f"{self._colorize('═' * 60, 'highlight')}\n")
ChainProgressTracker

Enhanced progress tracker for chain execution with live display

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
class ChainProgressTracker:
    """Enhanced progress tracker for chain execution with live display"""

    def __init__(self, chain_printer: 'ChainPrinter'):
        self.events: list[ProgressEvent] = []
        self.start_time = time.time()
        self.chain_printer = chain_printer
        self.current_task = None
        self.task_count = 0
        self.completed_tasks = 0

    async def emit_event(self, event: ProgressEvent):
        """Emit progress event with live display updates"""
        self.events.append(event)

        if event.event_type == "chain_start":
            self.task_count = event.metadata.get("task_count", 0)
            self.chain_printer.print_progress_start(event.node_name)

        elif event.event_type == "task_start":
            self.current_task = event.node_name
            self.chain_printer.print_task_start(event.node_name, self.completed_tasks, self.task_count)

        elif event.event_type == "task_complete":
            if event.status == NodeStatus.COMPLETED:
                self.completed_tasks += 1
                self.chain_printer.print_task_complete(event.node_name, self.completed_tasks, self.task_count)
            elif event.status == NodeStatus.FAILED:
                self.chain_printer.print_task_error(event.node_name, event.metadata.get("error", "Unknown error"))

        elif event.event_type == "chain_end":
            duration = time.time() - self.start_time
            self.chain_printer.print_progress_end(event.node_name, duration, event.status == NodeStatus.COMPLETED)

        elif event.event_type == "tool_call" and event.success == False:
            self.chain_printer.print_tool_usage_error(event.tool_name, event.metadata.get("error",
                                                                                          event.metadata.get("message",
                                                                                                             event.error_details.get(
                                                                                                                 "error",
                                                                                                                 "Unknown error"))))

        elif event.event_type == "tool_call" and event.success == True:
            self.chain_printer.print_tool_usage_success(event.tool_name, event.duration, event.is_meta_tool)

        elif event.event_type == "outline_created":
            self.chain_printer.print_outline_created(event.metadata.get("outline", {}))

        elif event.event_type == "reasoning_loop":
            self.chain_printer.print_reasoning_loop(event.metadata)

        elif event.event_type == "task_error":
            self.chain_printer.print_task_error(event.node_name, event.metadata.get("error", "Unknown error"))
emit_event(event) async

Emit progress event with live display updates

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
async def emit_event(self, event: ProgressEvent):
    """Emit progress event with live display updates"""
    self.events.append(event)

    if event.event_type == "chain_start":
        self.task_count = event.metadata.get("task_count", 0)
        self.chain_printer.print_progress_start(event.node_name)

    elif event.event_type == "task_start":
        self.current_task = event.node_name
        self.chain_printer.print_task_start(event.node_name, self.completed_tasks, self.task_count)

    elif event.event_type == "task_complete":
        if event.status == NodeStatus.COMPLETED:
            self.completed_tasks += 1
            self.chain_printer.print_task_complete(event.node_name, self.completed_tasks, self.task_count)
        elif event.status == NodeStatus.FAILED:
            self.chain_printer.print_task_error(event.node_name, event.metadata.get("error", "Unknown error"))

    elif event.event_type == "chain_end":
        duration = time.time() - self.start_time
        self.chain_printer.print_progress_end(event.node_name, duration, event.status == NodeStatus.COMPLETED)

    elif event.event_type == "tool_call" and event.success == False:
        self.chain_printer.print_tool_usage_error(event.tool_name, event.metadata.get("error",
                                                                                      event.metadata.get("message",
                                                                                                         event.error_details.get(
                                                                                                             "error",
                                                                                                             "Unknown error"))))

    elif event.event_type == "tool_call" and event.success == True:
        self.chain_printer.print_tool_usage_success(event.tool_name, event.duration, event.is_meta_tool)

    elif event.event_type == "outline_created":
        self.chain_printer.print_outline_created(event.metadata.get("outline", {}))

    elif event.event_type == "reasoning_loop":
        self.chain_printer.print_reasoning_loop(event.metadata)

    elif event.event_type == "task_error":
        self.chain_printer.print_task_error(event.node_name, event.metadata.get("error", "Unknown error"))
DualTrackEventProcessor

Processes events for both tracking perspectives

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
class DualTrackEventProcessor:
    """Processes events for both tracking perspectives"""

    def __init__(self):
        self.state = DualTrackState()
        self.event_history = []
        self.start_time = None

    def process_event(self, event: ProgressEvent):
        """Route event to appropriate track processors"""
        if not self.start_time:
            self.start_time = event.timestamp

        self.event_history.append(event)

        # Route to progress track processor
        if self._is_progress_track_event(event):
            self._process_progress_event(event)

        # Route to system track processor
        if self._is_system_track_event(event):
            self._process_system_event(event)

        # Update cross-track correlations
        self._update_correlations(event)

    def _is_progress_track_event(self, event: ProgressEvent) -> bool:
        """Determine if event belongs to progress track"""
        progress_events = {
            'execution_start', 'execution_complete',
            'outline_created', 'plan_created',
            'reasoning_loop', 'meta_tool_analysis',
            'tool_call', 'task_start', 'task_complete', 'task_error',
            'llm_call'
        }
        return event.event_type in progress_events

    def _is_system_track_event(self, event: ProgressEvent) -> bool:
        """Determine if event belongs to system track"""
        system_events = {
            'node_enter', 'node_exit', 'node_phase', 'error'
        }
        return event.event_type in system_events

    def _process_progress_event(self, event: ProgressEvent):
        """Process events in the semantic progress track"""
        if event.event_type == 'execution_start':
            self.state.semantic_progress['execution_phase'] = 'initializing'

        elif event.event_type == 'outline_created':
            # NEW & IMPORTANT - extract outline structure
            outline_data = event.metadata.get('outline') if event.metadata else None
            if outline_data:
                self.state.semantic_progress['current_outline'] = outline_data
                steps = outline_data.get('steps', []) if isinstance(outline_data, dict) else []
                self.state.semantic_progress['outline_progress'] = {
                    'current_step': 1,
                    'total_steps': len(steps),
                    'completed_steps': [],
                    'step_details': steps
                }
            self.state.semantic_progress['execution_phase'] = 'planning'

        elif event.event_type == 'plan_created':
            self.state.semantic_progress['execution_phase'] = 'executing'

        elif event.event_type == 'reasoning_loop':
            loop_num = event.metadata.get('loop_number', 0) if event.metadata else 0
            self.state.semantic_progress['current_reasoning_loop'] = loop_num

        elif event.event_type == 'tool_call':
            is_meta = event.metadata.get('is_meta_tool', False) if event.metadata else False
            tool_name = event.tool_name or 'unknown'

            if is_meta:
                if event.status == 'RUNNING':
                    self.state.semantic_progress['active_meta_tools'].append(tool_name)
                elif event.status in ['COMPLETED', 'FAILED']:
                    if tool_name in self.state.semantic_progress['active_meta_tools']:
                        self.state.semantic_progress['active_meta_tools'].remove(tool_name)

        elif event.event_type in ['task_start', 'task_complete', 'task_error']:
            task_state = self.state.semantic_progress['task_execution_state']
            if event.event_type == 'task_start':
                task_state['running'].append(event.task_id)
                task_state['total'] += 1
            elif event.event_type == 'task_complete':
                if event.task_id in task_state['running']:
                    task_state['running'].remove(event.task_id)
                task_state['completed'] += 1
            elif event.event_type == 'task_error':
                if event.task_id in task_state['running']:
                    task_state['running'].remove(event.task_id)
                task_state['failed'] += 1

        elif event.event_type == 'llm_call':
            llm_state = self.state.semantic_progress['llm_interactions']
            llm_state['total_calls'] += 1
            if event.llm_cost:
                llm_state['total_cost'] += event.llm_cost
            if event.llm_total_tokens:
                llm_state['total_tokens'] += event.llm_total_tokens

        elif event.event_type == 'execution_complete':
            self.state.semantic_progress['execution_phase'] = 'completed'

    def _process_system_event(self, event: ProgressEvent):
        """Process events in the system track"""
        node_name = event.node_name or 'unknown'

        if event.event_type == 'node_enter':
            self.state.system_state['active_nodes'][node_name] = {
                'status': 'active',
                'start_time': event.timestamp,
                'current_phase': 'initializing'
            }
            if node_name not in self.state.system_state['node_flow']:
                self.state.system_state['node_flow'].append(node_name)
            self.state.system_state['current_node'] = node_name

        elif event.event_type == 'node_exit':
            if node_name in self.state.system_state['active_nodes']:
                node_info = self.state.system_state['active_nodes'][node_name]
                node_info['status'] = 'completed' if event.success else 'failed'
                node_info['end_time'] = event.timestamp
                node_info['duration'] = event.node_duration
                # Remove from active
                del self.state.system_state['active_nodes'][node_name]

        elif event.event_type == 'node_phase':
            if node_name in self.state.system_state['active_nodes']:
                self.state.system_state['active_nodes'][node_name]['current_phase'] = event.node_phase
            self.state.system_state['node_phases'][node_name] = event.node_phase

        elif event.event_type == 'error':
            self.state.system_state['system_health']['error_count'] += 1
            error_detail = {
                'timestamp': event.timestamp,
                'node': node_name,
                'error': event.error_details or 'Unknown error'
            }
            self.state.system_state['system_health']['warnings'].append(error_detail)
            if self.state.system_state['system_health']['error_count'] > 5:
                self.state.system_state['system_health']['status'] = 'degraded'

    def _update_correlations(self, event: ProgressEvent):
        """Update cross-track correlations"""
        # Correlate semantic events with system nodes
        if event.node_name and self._is_progress_track_event(event):
            semantic_key = f"{event.event_type}:{event.timestamp}"
            self.state.correlations['semantic_to_system'][semantic_key] = event.node_name

        # Track timing correlations for performance analysis
        if event.node_duration:
            self.state.correlations['timing_correlations'].append({
                'event_type': event.event_type,
                'node_name': event.node_name,
                'duration': event.node_duration,
                'timestamp': event.timestamp
            })

    def get_progress_summary(self) -> dict[str, Any]:
        """Get comprehensive progress summary across both tracks"""
        current_time = time.time()
        elapsed = current_time - self.start_time if self.start_time else 0

        return {
            'dual_track_state': {
                'semantic_progress': self.state.semantic_progress.copy(),
                'system_state': self.state.system_state.copy(),
                'correlations_count': {
                    'semantic_to_system': len(self.state.correlations['semantic_to_system']),
                    'timing_data_points': len(self.state.correlations['timing_correlations'])
                }
            },
            'execution_metrics': {
                'total_events': len(self.event_history),
                'elapsed_time': elapsed,
                'events_per_second': len(self.event_history) / max(elapsed, 1),
                'system_health': self.state.system_state['system_health']['status'],
                'error_rate': self.state.system_state['system_health']['error_count'] / max(len(self.event_history), 1)
            },
            'current_activity': self._get_current_activity_summary()
        }

    def _get_current_activity_summary(self) -> dict[str, Any]:
        """Synthesize current activity from both tracks"""
        semantic = self.state.semantic_progress
        system = self.state.system_state

        return {
            'execution_phase': semantic['execution_phase'],
            'current_outline_step': semantic['outline_progress']['current_step'],
            'total_outline_steps': semantic['outline_progress']['total_steps'],
            'outline_completion_percent': (
                len(semantic['outline_progress']['completed_steps']) /
                max(semantic['outline_progress']['total_steps'], 1) * 100
            ),
            'active_reasoning_loop': semantic['current_reasoning_loop'],
            'active_meta_tools': semantic['active_meta_tools'].copy(),
            'running_tasks': len(semantic['task_execution_state']['running']),
            'current_system_node': system['current_node'],
            'active_system_nodes': len(system['active_nodes']),
            'system_health_status': system['system_health']['status']
        }
get_progress_summary()

Get comprehensive progress summary across both tracks

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
def get_progress_summary(self) -> dict[str, Any]:
    """Get comprehensive progress summary across both tracks"""
    current_time = time.time()
    elapsed = current_time - self.start_time if self.start_time else 0

    return {
        'dual_track_state': {
            'semantic_progress': self.state.semantic_progress.copy(),
            'system_state': self.state.system_state.copy(),
            'correlations_count': {
                'semantic_to_system': len(self.state.correlations['semantic_to_system']),
                'timing_data_points': len(self.state.correlations['timing_correlations'])
            }
        },
        'execution_metrics': {
            'total_events': len(self.event_history),
            'elapsed_time': elapsed,
            'events_per_second': len(self.event_history) / max(elapsed, 1),
            'system_health': self.state.system_state['system_health']['status'],
            'error_rate': self.state.system_state['system_health']['error_count'] / max(len(self.event_history), 1)
        },
        'current_activity': self._get_current_activity_summary()
    }
process_event(event)

Route event to appropriate track processors

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
def process_event(self, event: ProgressEvent):
    """Route event to appropriate track processors"""
    if not self.start_time:
        self.start_time = event.timestamp

    self.event_history.append(event)

    # Route to progress track processor
    if self._is_progress_track_event(event):
        self._process_progress_event(event)

    # Route to system track processor
    if self._is_system_track_event(event):
        self._process_system_event(event)

    # Update cross-track correlations
    self._update_correlations(event)
DualTrackState

Manages the dual-track system: Progress Track + System Track

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
class DualTrackState:
    """Manages the dual-track system: Progress Track + System Track"""

    def __init__(self):
        # Progress Track (What the Agent Does)
        self.semantic_progress = {
            'execution_phase': 'starting',  # starting, planning, executing, completing
            'current_outline': None,
            'outline_progress': {'current_step': 0, 'total_steps': 0, 'completed_steps': []},
            'current_reasoning_loop': 0,
            'active_meta_tools': [],
            'task_execution_state': {'total': 0, 'completed': 0, 'failed': 0, 'running': []},
            'llm_interactions': {'total_calls': 0, 'total_cost': 0.0, 'total_tokens': 0}
        }

        # System Track (Where the Agent Is)
        self.system_state = {
            'active_nodes': {},
            'node_flow': [],
            'current_node': None,
            'node_phases': {},  # node_name -> current phase
            'system_health': {'status': 'healthy', 'error_count': 0, 'warnings': []}
        }

        # Cross-track correlations
        self.correlations = {
            'semantic_to_system': {},  # semantic events -> system nodes
            'system_to_semantic': {},  # system nodes -> semantic events
            'timing_correlations': []
        }
EnhancedDisplayRenderer

Renders dual-track information with intelligent display management

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
class EnhancedDisplayRenderer:
    """Renders dual-track information with intelligent display management"""

    def __init__(self, mode: VerbosityMode, use_rich: bool = True):
        self.mode = mode
        self.use_rich = use_rich and RICH_AVAILABLE
        self.console = Console() if self.use_rich else None
        self.last_display_hash = None

    def render_dual_track_display(self, processor: DualTrackEventProcessor) -> str:
        """Main rendering method for dual-track display"""
        summary = processor.get_progress_summary()

        if not self.use_rich:
            return self._render_fallback_display(summary)

        if self.mode == VerbosityMode.MINIMAL:
            return self._render_minimal_display(summary)
        elif self.mode == VerbosityMode.STANDARD:
            return self._render_standard_display(summary)
        elif self.mode == VerbosityMode.VERBOSE:
            return self._render_verbose_display(summary)
        elif self.mode == VerbosityMode.DEBUG:
            return self._render_debug_display(summary)
        elif self.mode == VerbosityMode.REALTIME:
            return self._render_realtime_display(summary)

        return ""

    def _render_minimal_display(self, summary: dict[str, Any]) -> str:
        """Minimal display - just essential progress"""
        activity = summary['current_activity']
        metrics = summary['execution_metrics']

        # Simple status line
        phase = activity['execution_phase'].title()
        if activity['total_outline_steps'] > 0:
            progress = f"{activity['outline_completion_percent']:.0f}%"
            status = f"🤖 {phase} | Step {activity['current_outline_step']}/{activity['total_outline_steps']} | {progress}"
        else:
            status = f"🤖 {phase}"

        if metrics['error_rate'] > 0.1:
            status += f" | ⚠️ {metrics['error_rate']:.1%} errors"

        self.console.print(status, style="cyan")
        return status

    def _render_standard_display(self, summary: dict[str, Any]) -> str:
        """Standard display - balanced detail"""
        activity = summary['current_activity']
        semantic = summary['dual_track_state']['semantic_progress']
        system = summary['dual_track_state']['system_state']

        # Main header
        self.console.print()
        header_content = self._build_standard_header(activity, summary['execution_metrics'])
        header_panel = Panel(header_content, title="🤖 Agent Execution Status", style="cyan", box=box.ROUNDED)
        self.console.print(header_panel)

        # Progress overview
        if semantic['current_outline']:
            progress_content = self._build_outline_progress_display(semantic['outline_progress'])
            progress_panel = Panel(progress_content, title="📋 Execution Outline", style="blue", box=box.ROUNDED)
            self.console.print(progress_panel)

        # Current activity
        current_activity = self._build_current_activity_display(activity, semantic, system)
        activity_panel = Panel(current_activity, title="🔄 Current Activity", style="green", box=box.ROUNDED)
        self.console.print(activity_panel)

        return "standard_display_rendered"

    def _render_verbose_display(self, summary: dict[str, Any]) -> str:
        """Verbose display - detailed dual-track view"""
        # Render standard display first
        self._render_standard_display(summary)

        # Add detailed system state
        system = summary['dual_track_state']['system_state']
        system_content = self._build_system_state_display(system)
        system_panel = Panel(system_content, title="🔧 System State", style="yellow", box=box.ROUNDED)
        self.console.print(system_panel)

        # Add performance metrics
        metrics_content = self._build_metrics_display(summary['execution_metrics'])
        metrics_panel = Panel(metrics_content, title="📊 Performance Metrics", style="magenta", box=box.ROUNDED)
        self.console.print(metrics_panel)

        return "verbose_display_rendered"

    def _render_debug_display(self, summary: dict[str, Any]) -> str:
        """Debug display - full dual-track details"""
        # Render verbose display first
        self._render_verbose_display(summary)

        # Add correlation data
        correlations = summary['dual_track_state']['correlations_count']
        correlation_content = f"Semantic↔System: {correlations['semantic_to_system']} mappings\n"
        correlation_content += f"Timing Data Points: {correlations['timing_data_points']}"

        correlation_panel = Panel(correlation_content, title="🔗 Track Correlations", style="red", box=box.ROUNDED)
        self.console.print(correlation_panel)

        return "debug_display_rendered"

    def _render_realtime_display(self, summary: dict[str, Any]) -> str:
        """Realtime display - live updates"""
        activity = summary['current_activity']

        # Single line live status
        phase = activity['execution_phase']
        step_info = f"step {activity['current_outline_step']}/{activity['total_outline_steps']}" if activity[
                                                                                                        'total_outline_steps'] > 0 else "no outline"

        # Animated spinner
        spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
        spinner_idx = int(time.time() * 2) % len(spinner_chars)
        spinner = spinner_chars[spinner_idx]

        status_line = f"\r{spinner} 🤖 {phase.title()} | {step_info} | {activity['outline_completion_percent']:.0f}%"

        if activity['active_meta_tools']:
            tools_str = ",".join(activity['active_meta_tools'][:2])
            status_line += f" | tools:{tools_str}"

        print(status_line, end="", flush=True)
        return status_line

    def _build_standard_header(self, activity: dict[str, Any], metrics: dict[str, Any]) -> str:
        """Build standard header content"""
        lines = []

        # Execution phase with progress
        phase_line = f"Phase: {activity['execution_phase'].title()}"
        if activity['total_outline_steps'] > 0:
            phase_line += f" | Progress: {activity['outline_completion_percent']:.1f}%"
        lines.append(phase_line)

        # Current activity
        activity_parts = []
        if activity['current_outline_step'] > 0:
            activity_parts.append(f"Step {activity['current_outline_step']}/{activity['total_outline_steps']}")
        if activity['active_reasoning_loop'] > 0:
            activity_parts.append(f"Reasoning Loop {activity['active_reasoning_loop']}")
        if activity['active_meta_tools']:
            activity_parts.append(f"Using: {', '.join(activity['active_meta_tools'][:3])}")

        if activity_parts:
            lines.append("Activity: " + " | ".join(activity_parts))

        # System health
        health_line = f"Health: {activity['system_health_status'].title()}"
        if metrics['error_rate'] > 0:
            health_line += f" | Error Rate: {metrics['error_rate']:.1%}"
        health_line += f" | Runtime: {metrics['elapsed_time']:.1f}s"
        lines.append(health_line)

        return "\n".join(lines)

    def _build_outline_progress_display(self, outline_progress: dict[str, Any]) -> str:
        """Build outline progress visualization"""
        if not outline_progress.get('step_details'):
            return "No outline available"

        lines = []
        current_step = outline_progress['current_step']
        completed_steps = set(outline_progress['completed_steps'])

        for i, step_detail in enumerate(outline_progress['step_details'], 1):
            if isinstance(step_detail, dict):
                description = step_detail.get('description', f'Step {i}')
            else:
                description = str(step_detail)

            # Status icon
            if i in completed_steps:
                icon = "✅"
                style = "completed"
            elif i == current_step:
                icon = "🔄"
                style = "current"
            else:
                icon = "⏸️"
                style = "pending"

            # Truncate long descriptions
            if len(description) > 60:
                description = description[:57] + "..."

            lines.append(f"{icon} Step {i}: {description}")

        return "\n".join(lines)

    def _build_current_activity_display(self, activity: dict[str, Any], semantic: dict[str, Any],
                                        system: dict[str, Any]) -> str:
        """Build current activity summary"""
        lines = []

        # Current focus
        if system['current_node']:
            lines.append(f"🎯 Current Node: {system['current_node']}")

        # Active operations
        active_ops = []
        if activity['active_meta_tools']:
            active_ops.extend(activity['active_meta_tools'])
        if activity['running_tasks'] > 0:
            active_ops.append(f"{activity['running_tasks']} running tasks")

        if active_ops:
            lines.append(f"⚙️ Active Operations: {', '.join(active_ops)}")

        # Resource usage
        llm_info = semantic['llm_interactions']
        if llm_info['total_calls'] > 0:
            resource_line = f"💰 LLM: {llm_info['total_calls']} calls"
            if llm_info['total_cost'] > 0:
                resource_line += f", ${llm_info['total_cost']:.4f}"
            if llm_info['total_tokens'] > 0:
                resource_line += f", {llm_info['total_tokens']:,} tokens"
            lines.append(resource_line)

        return "\n".join(lines) if lines else "System initializing..."

    def _build_system_state_display(self, system: dict[str, Any]) -> str:
        """Build detailed system state display"""
        lines = []

        # Active nodes
        if system['active_nodes']:
            lines.append(f"🔄 Active Nodes ({len(system['active_nodes'])}):")
            for node_name, node_info in list(system['active_nodes'].items())[:5]:
                phase = node_info.get('current_phase', 'unknown')
                elapsed = time.time() - node_info.get('start_time', time.time())
                lines.append(f"  • {node_name}: {phase} ({elapsed:.1f}s)")

        # Node execution flow
        if system['node_flow']:
            flow_display = " → ".join(system['node_flow'][-5:])  # Last 5 nodes
            lines.append(f"🔗 Execution Flow: {flow_display}")

        # System health details
        health = system['system_health']
        if health['error_count'] > 0:
            lines.append(f"⚠️ Errors: {health['error_count']}")
            if health['warnings']:
                latest_warning = health['warnings'][-1]
                warning_time = datetime.fromtimestamp(latest_warning['timestamp']).strftime("%H:%M:%S")
                lines.append(f"   Latest: [{warning_time}] {latest_warning['error']}")

        return "\n".join(lines) if lines else "System state nominal"

    def _build_metrics_display(self, metrics: dict[str, Any]) -> str:
        """Build performance metrics display"""
        lines = []

        lines.append(f"📊 Total Events: {metrics['total_events']}")
        lines.append(f"⚡ Processing Rate: {metrics['events_per_second']:.1f} events/sec")
        lines.append(f"⏱️ Runtime: {metrics['elapsed_time']:.2f}s")
        lines.append(f"🏥 System Health: {metrics['system_health']}")

        if metrics['error_rate'] > 0:
            lines.append(f"❌ Error Rate: {metrics['error_rate']:.2%}")

        return "\n".join(lines)

    def _render_fallback_display(self, summary: dict[str, Any]) -> str:
        """Fallback display without Rich"""
        activity = summary['current_activity']
        metrics = summary['execution_metrics']

        print(f"\n{'=' * 60}")
        print("🤖 AGENT EXECUTION STATUS")
        print(f"{'=' * 60}")
        print(f"Phase: {activity['execution_phase'].title()}")
        if activity['total_outline_steps'] > 0:
            print(
                f"Progress: {activity['outline_completion_percent']:.1f}% (Step {activity['current_outline_step']}/{activity['total_outline_steps']})")
        print(f"Health: {activity['system_health_status'].title()}")
        print(f"Runtime: {metrics['elapsed_time']:.1f}s")
        print(f"Events: {metrics['total_events']} ({metrics['events_per_second']:.1f}/sec)")
        if metrics['error_rate'] > 0:
            print(f"Error Rate: {metrics['error_rate']:.1%}")
        print(f"{'=' * 60}")

        return "fallback_display_rendered"
render_dual_track_display(processor)

Main rendering method for dual-track display

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
def render_dual_track_display(self, processor: DualTrackEventProcessor) -> str:
    """Main rendering method for dual-track display"""
    summary = processor.get_progress_summary()

    if not self.use_rich:
        return self._render_fallback_display(summary)

    if self.mode == VerbosityMode.MINIMAL:
        return self._render_minimal_display(summary)
    elif self.mode == VerbosityMode.STANDARD:
        return self._render_standard_display(summary)
    elif self.mode == VerbosityMode.VERBOSE:
        return self._render_verbose_display(summary)
    elif self.mode == VerbosityMode.DEBUG:
        return self._render_debug_display(summary)
    elif self.mode == VerbosityMode.REALTIME:
        return self._render_realtime_display(summary)

    return ""
ProgressiveTreePrinter

Production-ready progressive tree printer with dual-track event processing

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
class ProgressiveTreePrinter:
    """Production-ready progressive tree printer with dual-track event processing"""

    def __init__(self, mode: VerbosityMode = VerbosityMode.STANDARD, use_rich: bool = True,
                 auto_refresh: bool = True, max_history: int = 1000, **kwargs):
        self.mode = mode
        self.use_rich = use_rich and RICH_AVAILABLE
        self.auto_refresh = auto_refresh
        self.max_history = max_history

        # Initialize dual-track processor
        self.event_processor = DualTrackEventProcessor()
        self.display_renderer = EnhancedDisplayRenderer(mode, use_rich)

        # Display management
        self._last_display_time = 0
        self._display_interval = self._get_display_interval()
        self._consecutive_errors = 0
        self._error_threshold = 5

        # Session tracking
        self.agent_name = "FlowAgent"
        self.session_id = None
        self._print_counter = 0

        # Accumulated runs tracking
        self._accumulated_runs = []
        self._current_run_id = 0
        self._global_start_time = time.time()

        # Rich console setup (if available)
        if self.use_rich:
            self.console = Console(record=True)

    def flush(self, run_name: str = None) -> dict[str, Any]:
        """Enhanced flush with dual-track state management"""
        try:
            current_time = time.time()
            if run_name is None:
                run_name = f"run_{self._current_run_id + 1}"

            # Generate comprehensive run data using dual-track system
            summary = self.event_processor.get_progress_summary()

            # Create comprehensive run data
            run_data = {
                "run_id": self._current_run_id + 1,
                "run_name": run_name,
                "flush_timestamp": current_time,
                "dual_track_summary": summary,
                "execution_events": self.event_processor.event_history.copy(),
                "semantic_progress": summary['dual_track_state']['semantic_progress'].copy(),
                "system_state": summary['dual_track_state']['system_state'].copy(),
                "execution_metrics": summary['execution_metrics'].copy(),
                "current_activity": summary['current_activity'].copy(),
                "print_counter": self._print_counter,
                "agent_name": self.agent_name,
                "session_id": self.session_id
            }

            # Add detailed execution flow analysis
            run_data["execution_analysis"] = {
                "outline_completion_rate": summary['current_activity']['outline_completion_percent'] / 100,
                "reasoning_loops_count": summary['current_activity']['active_reasoning_loop'],
                "system_node_count": len(summary['dual_track_state']['system_state']['node_flow']),
                "error_density": summary['execution_metrics']['error_rate'],
                "processing_efficiency": summary['execution_metrics']['events_per_second']
            }

            # Store in accumulated runs
            self._accumulated_runs.append(run_data)

            # Reset for fresh execution
            self._reset_for_fresh_execution()

            if self.use_rich:
                self.console.print(f"✅ Run '{run_name}' flushed and stored", style="green bold")
                self.console.print(f"📊 Total accumulated runs: {len(self._accumulated_runs)}", style="blue")
            else:
                print(f"✅ Run '{run_name}' flushed and stored")
                print(f"📊 Total accumulated runs: {len(self._accumulated_runs)}")

            return run_data

        except Exception as e:
            error_msg = f"❌ Error during flush: {e}"
            if self.use_rich:
                self.console.print(error_msg, style="red bold")
            else:
                print(error_msg)

            # Still try to reset for fresh execution
            self._reset_for_fresh_execution()
            return {"error": str(e), "timestamp": current_time}

    def print_final_summary(self):
        """Print comprehensive final summary with dual-track analysis"""
        try:
            if not self.use_rich:
                self._print_summary_fallback(self.event_processor.get_progress_summary())
                return

            summary = self.event_processor.get_progress_summary()

            # Clear display and show completion
            self.console.print()
            self.console.print("🎉 [bold green]EXECUTION COMPLETED[/bold green] 🎉")

            # Final dual-track display
            self.display_renderer.render_dual_track_display(self.event_processor)

            # Comprehensive summary table
            self._print_comprehensive_final_table(summary)

            # Performance analysis
            if self.mode in [VerbosityMode.VERBOSE, VerbosityMode.DEBUG]:
                self._print_dual_track_performance_analysis(summary)

        except Exception as e:
            print(f"⚠️ Error printing final summary: {e}")
            self._print_summary_fallback(self.event_processor.get_progress_summary())

    def get_accumulated_summary(self) -> dict[str, Any]:
        """Get comprehensive summary of all accumulated runs with dual-track metrics"""
        try:
            if not self._accumulated_runs:
                return {
                    "total_runs": 0,
                    "message": "No runs have been flushed yet"
                }

            # Calculate aggregate metrics
            total_cost = 0.0
            total_tokens = 0
            total_events = 0
            total_errors = 0
            total_duration = 0.0
            total_outline_steps = 0
            total_reasoning_loops = 0

            run_summaries = []

            for run in self._accumulated_runs:
                # Handle both old and new run data formats
                if 'dual_track_summary' in run:
                    # New dual-track format
                    summary = run['dual_track_summary']
                    semantic = summary['dual_track_state']['semantic_progress']
                    metrics = summary['execution_metrics']

                    total_cost += semantic['llm_interactions']['total_cost']
                    total_tokens += semantic['llm_interactions']['total_tokens']
                    total_events += metrics['total_events']
                    total_errors += summary['dual_track_state']['system_state']['system_health']['error_count']
                    total_duration += metrics['elapsed_time']
                    total_outline_steps += semantic['outline_progress']['total_steps']
                    total_reasoning_loops += semantic['current_reasoning_loop']

                    run_summaries.append({
                        "run_id": run["run_id"],
                        "run_name": run["run_name"],
                        "duration": metrics['elapsed_time'],
                        "events": metrics['total_events'],
                        "cost": semantic['llm_interactions']['total_cost'],
                        "tokens": semantic['llm_interactions']['total_tokens'],
                        "errors": summary['dual_track_state']['system_state']['system_health']['error_count'],
                        "outline_completion": summary['current_activity']['outline_completion_percent'],
                        "reasoning_loops": semantic['current_reasoning_loop'],
                        "system_health": summary['current_activity']['system_health_status']
                    })
                else:
                    # Fallback for old format
                    exec_summary = run.get("execution_summary", {})
                    perf = exec_summary.get("performance_metrics", {})
                    timing = exec_summary.get("timing", {})

                    total_cost += perf.get("total_cost", 0)
                    total_tokens += perf.get("total_tokens", 0)
                    total_events += perf.get("total_events", 0)
                    total_errors += perf.get("error_count", 0)
                    total_duration += timing.get("elapsed", 0)

                    run_summaries.append({
                        "run_id": run["run_id"],
                        "run_name": run["run_name"],
                        "duration": timing.get("elapsed", 0),
                        "events": perf.get("total_events", 0),
                        "cost": perf.get("total_cost", 0),
                        "tokens": perf.get("total_tokens", 0),
                        "errors": perf.get("error_count", 0),
                        "outline_completion": 0,  # Not available in old format
                        "reasoning_loops": 0,  # Not available in old format
                        "system_health": "unknown"
                    })

            # Calculate averages
            num_runs = len(self._accumulated_runs)
            avg_duration = total_duration / num_runs
            avg_cost = total_cost / num_runs
            avg_tokens = total_tokens / num_runs
            avg_events = total_events / num_runs

            return {
                "total_runs": num_runs,
                "current_run_id": self._current_run_id,
                "global_start_time": self._global_start_time,
                "total_accumulated_time": time.time() - self._global_start_time,

                "aggregate_metrics": {
                    "total_cost": total_cost,
                    "total_tokens": total_tokens,
                    "total_events": total_events,
                    "total_errors": total_errors,
                    "total_duration": total_duration,
                    "total_outline_steps": total_outline_steps,
                    "total_reasoning_loops": total_reasoning_loops,
                },

                "average_metrics": {
                    "avg_duration": avg_duration,
                    "avg_cost": avg_cost,
                    "avg_tokens": avg_tokens,
                    "avg_events": avg_events,
                    "avg_error_rate": total_errors / max(total_events, 1),
                    "avg_outline_completion": sum(r.get("outline_completion", 0) for r in run_summaries) / num_runs,
                    "avg_reasoning_loops": total_reasoning_loops / num_runs
                },

                "run_summaries": run_summaries,
                "performance_insights": self._generate_accumulated_insights(run_summaries)
            }

        except Exception as e:
            return {"error": f"Error generating accumulated summary: {e}"}

    def export_accumulated_data(self, filepath: str = None, extra_data: dict[str, Any] = None) -> str:
        """Export all accumulated run data to file with dual-track information"""
        try:
            if filepath is None:
                timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
                filepath = f"accumulated_execution_data_{timestamp}.json"

            export_data = {
                "export_timestamp": time.time(),
                "export_version": "2.0",  # Updated version for dual-track
                "printer_config": {
                    "mode": self.mode.value,
                    "use_rich": self.use_rich,
                    "agent_name": self.agent_name
                },
                "accumulated_summary": self.get_accumulated_summary(),
                "all_runs": self._accumulated_runs,
                "dual_track_metadata": {
                    "total_semantic_events": sum(
                        len(run.get('execution_events', [])) for run in self._accumulated_runs
                    ),
                    "total_system_nodes": sum(
                        len(run.get('system_state', {}).get('node_flow', [])) for run in self._accumulated_runs
                    ),
                    "export_features": ["dual_track_processing", "semantic_progress", "system_state"]
                }
            }

            export_data.update(extra_data or {})

            import json
            with open(filepath, 'w') as f:
                json.dump(export_data, f, indent=2, default=str)

            if self.use_rich:
                self.console.print(f"📁 Accumulated data exported to: {filepath}", style="green bold")
                self.console.print(f"📊 Total runs exported: {len(self._accumulated_runs)}", style="blue")
            else:
                print(f"📁 Accumulated data exported to: {filepath}")
                print(f"📊 Total runs exported: {len(self._accumulated_runs)}")

            return filepath

        except Exception as e:
            error_msg = f"❌ Error exporting accumulated data: {e}"
            if self.use_rich:
                self.console.print(error_msg, style="red bold")
            else:
                print(error_msg)
            return ""

    def _format_cost(self, cost: float) -> str:
        """Enhanced cost formatting with better precision"""
        if cost < 0.0001:
            return f"${cost * 1000000:.1f}μ"
        elif cost < 0.001:
            return f"${cost * 1000:.2f}m"
        elif cost < 1:
            return f"${cost:.4f}"
        else:
            return f"${cost:.2f}"

    def reset_global_start_time(self):
        """Reset global start time for new session"""
        self._global_start_time = time.time()

    def _print_accumulated_summary_fallback(self, summary: dict[str, Any]):
        """Fallback accumulated summary without Rich"""
        try:
            print(f"\n{'=' * 80}")
            print("🗂️ ACCUMULATED EXECUTION SUMMARY")
            print(f"{'=' * 80}")

            agg = summary["aggregate_metrics"]
            avg = summary["average_metrics"]

            print(f"Total Runs: {summary['total_runs']}")
            print(f"Total Duration: {agg['total_duration']:.1f}s (avg: {avg['avg_duration']:.1f}s)")
            print(f"Total Events: {agg['total_events']} (avg: {avg['avg_events']:.1f})")

            if agg["total_cost"] > 0:
                print(f"Total Cost: {self._format_cost(agg['total_cost'])} (avg: {self._format_cost(avg['avg_cost'])})")

            if agg["total_tokens"] > 0:
                print(f"Total Tokens: {agg['total_tokens']:,} (avg: {avg['avg_tokens']:,.0f})")

            # Dual-track specific metrics
            if agg.get("total_outline_steps", 0) > 0:
                print(f"Total Outline Steps: {agg['total_outline_steps']}")
                print(f"Avg Outline Completion: {avg['avg_outline_completion']:.1f}%")

            if agg.get("total_reasoning_loops", 0) > 0:
                print(f"Total Reasoning Loops: {agg['total_reasoning_loops']} (avg: {avg['avg_reasoning_loops']:.1f})")

            print(f"Average Error Rate: {avg['avg_error_rate']:.1%}")

            print(f"\n{'=' * 80}")
            print("🏃 INDIVIDUAL RUNS:")
            print(f"{'=' * 80}")

            for run in summary["run_summaries"]:
                cost_str = self._format_cost(run["cost"]) if run["cost"] > 0 else "N/A"
                outline_str = f"{run['outline_completion']:.0f}%" if run.get('outline_completion') else "N/A"

                print(f"• {run['run_name']}: {run['duration']:.1f}s | "
                      f"{run['events']} events | Cost: {cost_str} | "
                      f"Outline: {outline_str} | Health: {run.get('system_health', 'unknown')}")

            # Insights
            if summary.get("performance_insights"):
                print("\n🔍 PERFORMANCE INSIGHTS:")
                print(f"{'-' * 40}")
                for insight in summary["performance_insights"]:
                    print(f"• {insight}")

            print(f"{'=' * 80}")

        except Exception as e:
            print(f"❌ Error printing fallback summary: {e}")

    def _generate_accumulated_insights(self, run_summaries: list[dict[str, Any]]) -> list[str]:
        """Generate insights from accumulated run data with dual-track awareness"""
        insights = []

        if not run_summaries:
            return insights

        try:
            num_runs = len(run_summaries)

            # Performance trends
            if num_runs > 1:
                recent_runs = run_summaries[-3:]  # Last 3 runs
                older_runs = run_summaries[:-3] if len(run_summaries) > 3 else []

                if older_runs:
                    recent_avg_duration = sum(r["duration"] for r in recent_runs) / len(recent_runs)
                    older_avg_duration = sum(r["duration"] for r in older_runs) / len(older_runs)

                    if recent_avg_duration < older_avg_duration * 0.8:
                        insights.append("🚀 Performance improving: Recent runs 20% faster")
                    elif recent_avg_duration > older_avg_duration * 1.2:
                        insights.append("⚠️ Performance degrading: Recent runs 20% slower")

            # Error patterns
            error_counts = [r["errors"] for r in run_summaries]
            avg_errors = sum(error_counts) / len(error_counts)

            if avg_errors == 0:
                insights.append("✨ Perfect reliability: Zero errors across all runs")
            elif avg_errors < 1:
                insights.append(f"✅ High reliability: {avg_errors:.1f} average errors per run")
            elif avg_errors > 5:
                insights.append(f"🔧 Reliability concerns: {avg_errors:.1f} average errors per run")

            # Cost efficiency
            costs = [r["cost"] for r in run_summaries if r["cost"] > 0]
            if costs:
                avg_cost = sum(costs) / len(costs)
                if avg_cost < 0.01:
                    insights.append(f"💚 Very cost efficient: {self._format_cost(avg_cost)} average per run")
                elif avg_cost > 0.1:
                    insights.append(f"💸 High cost per run: {self._format_cost(avg_cost)} average")

            # Dual-track specific insights
            outline_completions = [r.get("outline_completion", 0) for r in run_summaries if r.get("outline_completion")]
            if outline_completions:
                avg_completion = sum(outline_completions) / len(outline_completions)
                if avg_completion > 95:
                    insights.append(f"🎯 Excellent outline completion: {avg_completion:.1f}% average")
                elif avg_completion < 80:
                    insights.append(f"📋 Low outline completion: {avg_completion:.1f}% - investigate planning")

            reasoning_loops = [r.get("reasoning_loops", 0) for r in run_summaries if r.get("reasoning_loops")]
            if reasoning_loops:
                avg_loops = sum(reasoning_loops) / len(reasoning_loops)
                if avg_loops > 10:
                    insights.append(f"🧠 High reasoning activity: {avg_loops:.1f} loops average")
                elif avg_loops < 3:
                    insights.append(f"⚡ Efficient reasoning: {avg_loops:.1f} loops average")

            # System health patterns
            health_statuses = [r.get("system_health", "unknown") for r in run_summaries]
            healthy_count = sum(1 for h in health_statuses if h == "healthy")
            if healthy_count == len(health_statuses):
                insights.append("💚 Perfect system health across all runs")
            elif healthy_count / len(health_statuses) < 0.8:
                insights.append("⚠️ System health issues detected in multiple runs")

            # Consistency analysis
            durations = [r["duration"] for r in run_summaries]
            if len(durations) > 1:
                import statistics
                duration_std = statistics.stdev(durations)
                duration_mean = statistics.mean(durations)
                cv = duration_std / duration_mean if duration_mean > 0 else 0

                if cv < 0.2:
                    insights.append("🎯 Highly consistent execution times")
                elif cv > 0.5:
                    insights.append("📊 Variable execution times - investigate bottlenecks")

        except Exception as e:
            insights.append(f"⚠️ Error generating insights: {e}")

        return insights

    def _reset_for_fresh_execution(self):
        """Reset internal state for a completely fresh execution"""
        try:
            # Increment run counter
            self._current_run_id += 1

            # Reset dual-track processor
            self.event_processor = DualTrackEventProcessor()

            # Reset display management
            self._last_display_time = 0
            self._print_counter = 0
            self._consecutive_errors = 0

            # Reset session info
            self.session_id = None

        except Exception as e:
            print(f"⚠️ Error during reset: {e}")

    def _print_comprehensive_final_table(self, summary: dict[str, Any]):
        """Print comprehensive final summary table with dual-track metrics"""
        if not self.use_rich:
            return

        table = Table(title="📊 Final Execution Summary", box=box.ROUNDED)
        table.add_column("Category", style="cyan", min_width=15)
        table.add_column("Metric", style="white", min_width=20)
        table.add_column("Value", style="green", min_width=15)

        # Session info
        table.add_row("Session", "Agent Name", self.agent_name)
        table.add_row("", "Session ID", str(self.session_id or "N/A"))
        table.add_row("", "Total Runtime", f"{summary['execution_metrics']['elapsed_time']:.2f}s")

        # Progress track
        semantic = summary['dual_track_state']['semantic_progress']
        activity = summary['current_activity']

        table.add_row("Progress", "Final Phase", activity['execution_phase'].title())
        table.add_row("", "Outline Completion", f"{activity['outline_completion_percent']:.1f}%")
        table.add_row("", "Reasoning Loops", str(semantic['current_reasoning_loop']))

        # System track
        system = summary['dual_track_state']['system_state']
        table.add_row("System", "Nodes Processed", str(len(system['node_flow'])))
        table.add_row("", "System Health", system['system_health']['status'].title())
        table.add_row("", "Error Count", str(system['system_health']['error_count']))

        # Performance
        metrics = summary['execution_metrics']
        table.add_row("Performance", "Total Events", str(metrics['total_events']))
        table.add_row("", "Processing Rate", f"{metrics['events_per_second']:.1f} events/sec")
        table.add_row("", "Error Rate", f"{metrics['error_rate']:.1%}")

        # LLM metrics
        llm = semantic['llm_interactions']
        if llm['total_calls'] > 0:
            table.add_row("LLM", "Total Calls", str(llm['total_calls']))
            if llm['total_cost'] > 0:
                table.add_row("", "Total Cost", self._format_cost(llm['total_cost']))
            if llm['total_tokens'] > 0:
                table.add_row("", "Total Tokens", f"{llm['total_tokens']:,}")

        self.console.print()
        self.console.print(table)

    def _print_dual_track_performance_analysis(self, summary: dict[str, Any]):
        """Print performance analysis with dual-track insights"""
        if not self.use_rich:
            return

        insights = []

        # Progress track analysis
        activity = summary['current_activity']
        semantic = summary['dual_track_state']['semantic_progress']

        if activity['outline_completion_percent'] > 95:
            insights.append("✨ Excellent outline completion")
        elif activity['outline_completion_percent'] < 80:
            insights.append("⚠️ Low outline completion - planning may need improvement")

        if semantic['current_reasoning_loop'] > 10:
            insights.append("🧠 High reasoning activity - complex problem solving")
        elif semantic['current_reasoning_loop'] < 3:
            insights.append("⚡ Efficient reasoning - straightforward execution")

        # System track analysis
        system = summary['dual_track_state']['system_state']
        metrics = summary['execution_metrics']

        if metrics['events_per_second'] > 10:
            insights.append("🚀 High processing efficiency")
        elif metrics['events_per_second'] < 2:
            insights.append("🐌 Low processing rate - possible bottlenecks")

        if system['system_health']['status'] == 'healthy':
            insights.append("💚 Perfect system health")
        else:
            insights.append("🔧 System health issues detected")

        # Cross-track analysis
        if (activity['outline_completion_percent'] > 90 and
            system['system_health']['status'] == 'healthy' and
            metrics['error_rate'] < 0.1):
            insights.append("🏆 Optimal execution across all tracks")

        if insights:
            analysis_panel = Panel(
                "\n".join(f"• {insight}" for insight in insights),
                title="🔍 Dual-Track Performance Analysis",
                style="yellow"
            )
            self.console.print()
            self.console.print(analysis_panel)

    def print_accumulated_summary(self):
        """Print comprehensive summary of all accumulated runs"""
        try:
            summary = self.get_accumulated_summary()

            if summary.get("total_runs", 0) == 0:
                if self.use_rich:
                    self.console.print("📊 No accumulated runs to display", style="yellow")
                else:
                    print("📊 No accumulated runs to display")
                return

            if not self.use_rich:
                self._print_accumulated_summary_fallback(summary)
                return

            # Rich formatted output
            self.console.print()
            self.console.print("🗂️ [bold cyan]ACCUMULATED EXECUTION SUMMARY[/bold cyan] 🗂️")

            # Overview table with dual-track metrics
            overview_table = Table(title="📊 Aggregate Overview", box=box.ROUNDED)
            overview_table.add_column("Metric", style="cyan", min_width=25)
            overview_table.add_column("Total", style="green", min_width=15)
            overview_table.add_column("Average", style="blue", min_width=15)

            agg = summary["aggregate_metrics"]
            avg = summary["average_metrics"]

            overview_table.add_row("Runs", str(summary["total_runs"]), "")
            overview_table.add_row("Duration", f"{agg['total_duration']:.1f}s", f"{avg['avg_duration']:.1f}s")
            overview_table.add_row("Events", str(agg["total_events"]), f"{avg['avg_events']:.1f}")

            if agg["total_cost"] > 0:
                overview_table.add_row("Cost", self._format_cost(agg["total_cost"]), self._format_cost(avg["avg_cost"]))

            if agg["total_tokens"] > 0:
                overview_table.add_row("Tokens", f"{agg['total_tokens']:,}", f"{avg['avg_tokens']:,.0f}")

            # Dual-track specific metrics
            if agg.get("total_outline_steps", 0) > 0:
                overview_table.add_row("Outline Steps", str(agg["total_outline_steps"]), "")
                overview_table.add_row("Outline Completion", "", f"{avg['avg_outline_completion']:.1f}%")

            if agg.get("total_reasoning_loops", 0) > 0:
                overview_table.add_row("Reasoning Loops", str(agg["total_reasoning_loops"]),
                                       f"{avg['avg_reasoning_loops']:.1f}")

            overview_table.add_row("Error Rate", "", f"{avg['avg_error_rate']:.1%}")

            self.console.print(overview_table)

            # Individual runs table
            runs_table = Table(title="🏃 Individual Runs", box=box.ROUNDED)
            runs_table.add_column("Run", style="cyan")
            runs_table.add_column("Duration", style="blue")
            runs_table.add_column("Events", style="green")
            runs_table.add_column("Cost", style="yellow")
            runs_table.add_column("Outline", style="magenta")
            runs_table.add_column("Health", style="white")

            for run in summary["run_summaries"]:
                cost_str = self._format_cost(run["cost"]) if run["cost"] > 0 else "-"
                outline_str = f"{run.get('outline_completion', 0):.0f}%" if run.get('outline_completion') else "N/A"
                health_str = run.get('system_health', 'unknown')

                runs_table.add_row(
                    run["run_name"],
                    f"{run['duration']:.1f}s",
                    str(run['events']),
                    cost_str,
                    outline_str,
                    health_str
                )

            self.console.print(runs_table)

            # Insights
            if summary.get("performance_insights"):
                insights_panel = Panel(
                    "\n".join(f"• {insight}" for insight in summary["performance_insights"]),
                    title="🔍 Performance Insights",
                    style="yellow"
                )
                self.console.print(insights_panel)

        except Exception as e:
            error_msg = f"❌ Error printing accumulated summary: {e}"
            if self.use_rich:
                self.console.print(error_msg, style="red bold")
            else:
                print(error_msg)

    def _get_display_interval(self) -> float:
        """Get appropriate display update interval based on mode"""
        intervals = {
            VerbosityMode.MINIMAL: 2.0,
            VerbosityMode.STANDARD: 1.0,
            VerbosityMode.VERBOSE: 0.5,
            VerbosityMode.DEBUG: 0.1,
            VerbosityMode.REALTIME: 0.2
        }
        return intervals.get(self.mode, 1.0)

    async def progress_callback(self, event: ProgressEvent):
        """Enhanced progress callback with dual-track processing"""
        try:
            # Update agent info
            if event.agent_name:
                self.agent_name = event.agent_name
            if event.session_id:
                self.session_id = event.session_id

            # Process through dual-track system
            self.event_processor.process_event(event)

            # Update display if enough time has passed or important event
            should_update = (
                time.time() - self._last_display_time >= self._display_interval or
                self._is_important_event(event)
            )

            if should_update and self.auto_refresh:
                self._update_display()
                self._last_display_time = time.time()

        except Exception as e:
            self._consecutive_errors += 1
            if self._consecutive_errors <= self._error_threshold:
                print(f"⚠️ Progress callback error #{self._consecutive_errors}: {e}")
            if self._consecutive_errors > self._error_threshold:
                print("🚨 Progress printing disabled due to excessive errors")
                self.progress_callback = self._noop_callback

    def _is_important_event(self, event: ProgressEvent) -> bool:
        """Determine if event requires immediate display update"""
        important_events = {
            'execution_start', 'execution_complete',
            'outline_created', 'plan_created',
            'error', 'task_error'
        }
        return event.event_type in important_events or event.success is False

    def _update_display(self):
        """Update the display using dual-track renderer"""
        try:
            self._print_counter += 1
            self.display_renderer.render_dual_track_display(self.event_processor)
            self._consecutive_errors = 0  # Reset error counter on success

        except Exception as e:
            self._consecutive_errors += 1
            print(f"⚠️ Display update error: {e}")

    def print_execution_summary(self):
        """Print comprehensive execution summary"""
        try:
            summary = self.event_processor.get_progress_summary()

            if not self.use_rich:
                self._print_summary_fallback(summary)
                return

            self.display_renderer.console.print()
            self.display_renderer.console.print("🎉 [bold green]EXECUTION SUMMARY[/bold green] 🎉")

            # Final status display
            self.display_renderer.render_dual_track_display(self.event_processor)

            # Detailed metrics table
            self._print_detailed_metrics_table(summary)

        except Exception as e:
            print(f"⚠️ Error printing execution summary: {e}")
            self._print_summary_fallback(self.event_processor.get_progress_summary())

    def _print_detailed_metrics_table(self, summary: dict[str, Any]):
        """Print detailed metrics table"""
        if not self.use_rich:
            return

        table = Table(title="📊 Execution Metrics", box=box.ROUNDED)
        table.add_column("Category", style="cyan")
        table.add_column("Metric", style="white")
        table.add_column("Value", style="green")

        # Progress track metrics
        semantic = summary['dual_track_state']['semantic_progress']
        table.add_row("Progress", "Execution Phase", semantic['execution_phase'].title())
        table.add_row("", "Outline Steps",
                      f"{len(semantic['outline_progress']['completed_steps'])}/{semantic['outline_progress']['total_steps']}")
        table.add_row("", "Reasoning Loops", str(semantic['current_reasoning_loop']))

        # System track metrics
        system = summary['dual_track_state']['system_state']
        table.add_row("System", "Node Flow Length", str(len(system['node_flow'])))
        table.add_row("", "System Health", system['system_health']['status'].title())
        table.add_row("", "Error Count", str(system['system_health']['error_count']))

        # Execution metrics
        metrics = summary['execution_metrics']
        table.add_row("Performance", "Total Events", str(metrics['total_events']))
        table.add_row("", "Runtime", f"{metrics['elapsed_time']:.2f}s")
        table.add_row("", "Events/sec", f"{metrics['events_per_second']:.1f}")

        # LLM metrics
        llm = semantic['llm_interactions']
        if llm['total_calls'] > 0:
            table.add_row("LLM", "Total Calls", str(llm['total_calls']))
            if llm['total_cost'] > 0:
                table.add_row("", "Total Cost", f"${llm['total_cost']:.4f}")
            if llm['total_tokens'] > 0:
                table.add_row("", "Total Tokens", f"{llm['total_tokens']:,}")

        self.display_renderer.console.print()
        self.display_renderer.console.print(table)

    def _print_summary_fallback(self, summary: dict[str, Any]):
        """Fallback summary without Rich"""
        activity = summary['current_activity']
        metrics = summary['execution_metrics']
        semantic = summary['dual_track_state']['semantic_progress']

        print(f"\n{'=' * 60}")
        print("🎉 EXECUTION SUMMARY")
        print(f"{'=' * 60}")
        print(f"Agent: {self.agent_name}")
        print(f"Session: {self.session_id or 'N/A'}")
        print(f"Final Phase: {activity['execution_phase'].title()}")

        if semantic['outline_progress']['total_steps'] > 0:
            completed_steps = len(semantic['outline_progress']['completed_steps'])
            total_steps = semantic['outline_progress']['total_steps']
            print(
                f"Outline Progress: {completed_steps}/{total_steps} steps ({activity['outline_completion_percent']:.1f}%)")

        print(f"Total Runtime: {metrics['elapsed_time']:.2f}s")
        print(f"Total Events: {metrics['total_events']}")
        print(f"Processing Rate: {metrics['events_per_second']:.1f} events/sec")
        print(f"System Health: {activity['system_health_status'].title()}")

        if metrics['error_rate'] > 0:
            print(f"Error Rate: {metrics['error_rate']:.2%}")

        llm = semantic['llm_interactions']
        if llm['total_calls'] > 0:
            print(f"LLM Calls: {llm['total_calls']}")
            if llm['total_cost'] > 0:
                print(f"LLM Cost: ${llm['total_cost']:.4f}")

        print(f"{'=' * 60}")

    # [Keep all the existing methods that were listed as "methods to keep"]
    # flush, get_accumulated_summary, export_accumulated_data, etc.

    async def _noop_callback(self, event: ProgressEvent):
        """No-op callback when printing is disabled"""
        pass

    def get_current_execution_state(self) -> dict[str, Any]:
        """Get current execution state for external monitoring"""
        return self.event_processor.get_progress_summary()

    def force_display_update(self):
        """Force an immediate display update"""
        self._update_display()

    def set_display_mode(self, mode: VerbosityMode):
        """Change display mode at runtime"""
        self.mode = mode
        self.display_renderer = EnhancedDisplayRenderer(mode, self.use_rich)
        self._display_interval = self._get_display_interval()
export_accumulated_data(filepath=None, extra_data=None)

Export all accumulated run data to file with dual-track information

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
def export_accumulated_data(self, filepath: str = None, extra_data: dict[str, Any] = None) -> str:
    """Export all accumulated run data to file with dual-track information"""
    try:
        if filepath is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filepath = f"accumulated_execution_data_{timestamp}.json"

        export_data = {
            "export_timestamp": time.time(),
            "export_version": "2.0",  # Updated version for dual-track
            "printer_config": {
                "mode": self.mode.value,
                "use_rich": self.use_rich,
                "agent_name": self.agent_name
            },
            "accumulated_summary": self.get_accumulated_summary(),
            "all_runs": self._accumulated_runs,
            "dual_track_metadata": {
                "total_semantic_events": sum(
                    len(run.get('execution_events', [])) for run in self._accumulated_runs
                ),
                "total_system_nodes": sum(
                    len(run.get('system_state', {}).get('node_flow', [])) for run in self._accumulated_runs
                ),
                "export_features": ["dual_track_processing", "semantic_progress", "system_state"]
            }
        }

        export_data.update(extra_data or {})

        import json
        with open(filepath, 'w') as f:
            json.dump(export_data, f, indent=2, default=str)

        if self.use_rich:
            self.console.print(f"📁 Accumulated data exported to: {filepath}", style="green bold")
            self.console.print(f"📊 Total runs exported: {len(self._accumulated_runs)}", style="blue")
        else:
            print(f"📁 Accumulated data exported to: {filepath}")
            print(f"📊 Total runs exported: {len(self._accumulated_runs)}")

        return filepath

    except Exception as e:
        error_msg = f"❌ Error exporting accumulated data: {e}"
        if self.use_rich:
            self.console.print(error_msg, style="red bold")
        else:
            print(error_msg)
        return ""
flush(run_name=None)

Enhanced flush with dual-track state management

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
def flush(self, run_name: str = None) -> dict[str, Any]:
    """Enhanced flush with dual-track state management"""
    try:
        current_time = time.time()
        if run_name is None:
            run_name = f"run_{self._current_run_id + 1}"

        # Generate comprehensive run data using dual-track system
        summary = self.event_processor.get_progress_summary()

        # Create comprehensive run data
        run_data = {
            "run_id": self._current_run_id + 1,
            "run_name": run_name,
            "flush_timestamp": current_time,
            "dual_track_summary": summary,
            "execution_events": self.event_processor.event_history.copy(),
            "semantic_progress": summary['dual_track_state']['semantic_progress'].copy(),
            "system_state": summary['dual_track_state']['system_state'].copy(),
            "execution_metrics": summary['execution_metrics'].copy(),
            "current_activity": summary['current_activity'].copy(),
            "print_counter": self._print_counter,
            "agent_name": self.agent_name,
            "session_id": self.session_id
        }

        # Add detailed execution flow analysis
        run_data["execution_analysis"] = {
            "outline_completion_rate": summary['current_activity']['outline_completion_percent'] / 100,
            "reasoning_loops_count": summary['current_activity']['active_reasoning_loop'],
            "system_node_count": len(summary['dual_track_state']['system_state']['node_flow']),
            "error_density": summary['execution_metrics']['error_rate'],
            "processing_efficiency": summary['execution_metrics']['events_per_second']
        }

        # Store in accumulated runs
        self._accumulated_runs.append(run_data)

        # Reset for fresh execution
        self._reset_for_fresh_execution()

        if self.use_rich:
            self.console.print(f"✅ Run '{run_name}' flushed and stored", style="green bold")
            self.console.print(f"📊 Total accumulated runs: {len(self._accumulated_runs)}", style="blue")
        else:
            print(f"✅ Run '{run_name}' flushed and stored")
            print(f"📊 Total accumulated runs: {len(self._accumulated_runs)}")

        return run_data

    except Exception as e:
        error_msg = f"❌ Error during flush: {e}"
        if self.use_rich:
            self.console.print(error_msg, style="red bold")
        else:
            print(error_msg)

        # Still try to reset for fresh execution
        self._reset_for_fresh_execution()
        return {"error": str(e), "timestamp": current_time}
force_display_update()

Force an immediate display update

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1390
1391
1392
def force_display_update(self):
    """Force an immediate display update"""
    self._update_display()
get_accumulated_summary()

Get comprehensive summary of all accumulated runs with dual-track metrics

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
def get_accumulated_summary(self) -> dict[str, Any]:
    """Get comprehensive summary of all accumulated runs with dual-track metrics"""
    try:
        if not self._accumulated_runs:
            return {
                "total_runs": 0,
                "message": "No runs have been flushed yet"
            }

        # Calculate aggregate metrics
        total_cost = 0.0
        total_tokens = 0
        total_events = 0
        total_errors = 0
        total_duration = 0.0
        total_outline_steps = 0
        total_reasoning_loops = 0

        run_summaries = []

        for run in self._accumulated_runs:
            # Handle both old and new run data formats
            if 'dual_track_summary' in run:
                # New dual-track format
                summary = run['dual_track_summary']
                semantic = summary['dual_track_state']['semantic_progress']
                metrics = summary['execution_metrics']

                total_cost += semantic['llm_interactions']['total_cost']
                total_tokens += semantic['llm_interactions']['total_tokens']
                total_events += metrics['total_events']
                total_errors += summary['dual_track_state']['system_state']['system_health']['error_count']
                total_duration += metrics['elapsed_time']
                total_outline_steps += semantic['outline_progress']['total_steps']
                total_reasoning_loops += semantic['current_reasoning_loop']

                run_summaries.append({
                    "run_id": run["run_id"],
                    "run_name": run["run_name"],
                    "duration": metrics['elapsed_time'],
                    "events": metrics['total_events'],
                    "cost": semantic['llm_interactions']['total_cost'],
                    "tokens": semantic['llm_interactions']['total_tokens'],
                    "errors": summary['dual_track_state']['system_state']['system_health']['error_count'],
                    "outline_completion": summary['current_activity']['outline_completion_percent'],
                    "reasoning_loops": semantic['current_reasoning_loop'],
                    "system_health": summary['current_activity']['system_health_status']
                })
            else:
                # Fallback for old format
                exec_summary = run.get("execution_summary", {})
                perf = exec_summary.get("performance_metrics", {})
                timing = exec_summary.get("timing", {})

                total_cost += perf.get("total_cost", 0)
                total_tokens += perf.get("total_tokens", 0)
                total_events += perf.get("total_events", 0)
                total_errors += perf.get("error_count", 0)
                total_duration += timing.get("elapsed", 0)

                run_summaries.append({
                    "run_id": run["run_id"],
                    "run_name": run["run_name"],
                    "duration": timing.get("elapsed", 0),
                    "events": perf.get("total_events", 0),
                    "cost": perf.get("total_cost", 0),
                    "tokens": perf.get("total_tokens", 0),
                    "errors": perf.get("error_count", 0),
                    "outline_completion": 0,  # Not available in old format
                    "reasoning_loops": 0,  # Not available in old format
                    "system_health": "unknown"
                })

        # Calculate averages
        num_runs = len(self._accumulated_runs)
        avg_duration = total_duration / num_runs
        avg_cost = total_cost / num_runs
        avg_tokens = total_tokens / num_runs
        avg_events = total_events / num_runs

        return {
            "total_runs": num_runs,
            "current_run_id": self._current_run_id,
            "global_start_time": self._global_start_time,
            "total_accumulated_time": time.time() - self._global_start_time,

            "aggregate_metrics": {
                "total_cost": total_cost,
                "total_tokens": total_tokens,
                "total_events": total_events,
                "total_errors": total_errors,
                "total_duration": total_duration,
                "total_outline_steps": total_outline_steps,
                "total_reasoning_loops": total_reasoning_loops,
            },

            "average_metrics": {
                "avg_duration": avg_duration,
                "avg_cost": avg_cost,
                "avg_tokens": avg_tokens,
                "avg_events": avg_events,
                "avg_error_rate": total_errors / max(total_events, 1),
                "avg_outline_completion": sum(r.get("outline_completion", 0) for r in run_summaries) / num_runs,
                "avg_reasoning_loops": total_reasoning_loops / num_runs
            },

            "run_summaries": run_summaries,
            "performance_insights": self._generate_accumulated_insights(run_summaries)
        }

    except Exception as e:
        return {"error": f"Error generating accumulated summary: {e}"}
get_current_execution_state()

Get current execution state for external monitoring

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1386
1387
1388
def get_current_execution_state(self) -> dict[str, Any]:
    """Get current execution state for external monitoring"""
    return self.event_processor.get_progress_summary()
print_accumulated_summary()

Print comprehensive summary of all accumulated runs

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
def print_accumulated_summary(self):
    """Print comprehensive summary of all accumulated runs"""
    try:
        summary = self.get_accumulated_summary()

        if summary.get("total_runs", 0) == 0:
            if self.use_rich:
                self.console.print("📊 No accumulated runs to display", style="yellow")
            else:
                print("📊 No accumulated runs to display")
            return

        if not self.use_rich:
            self._print_accumulated_summary_fallback(summary)
            return

        # Rich formatted output
        self.console.print()
        self.console.print("🗂️ [bold cyan]ACCUMULATED EXECUTION SUMMARY[/bold cyan] 🗂️")

        # Overview table with dual-track metrics
        overview_table = Table(title="📊 Aggregate Overview", box=box.ROUNDED)
        overview_table.add_column("Metric", style="cyan", min_width=25)
        overview_table.add_column("Total", style="green", min_width=15)
        overview_table.add_column("Average", style="blue", min_width=15)

        agg = summary["aggregate_metrics"]
        avg = summary["average_metrics"]

        overview_table.add_row("Runs", str(summary["total_runs"]), "")
        overview_table.add_row("Duration", f"{agg['total_duration']:.1f}s", f"{avg['avg_duration']:.1f}s")
        overview_table.add_row("Events", str(agg["total_events"]), f"{avg['avg_events']:.1f}")

        if agg["total_cost"] > 0:
            overview_table.add_row("Cost", self._format_cost(agg["total_cost"]), self._format_cost(avg["avg_cost"]))

        if agg["total_tokens"] > 0:
            overview_table.add_row("Tokens", f"{agg['total_tokens']:,}", f"{avg['avg_tokens']:,.0f}")

        # Dual-track specific metrics
        if agg.get("total_outline_steps", 0) > 0:
            overview_table.add_row("Outline Steps", str(agg["total_outline_steps"]), "")
            overview_table.add_row("Outline Completion", "", f"{avg['avg_outline_completion']:.1f}%")

        if agg.get("total_reasoning_loops", 0) > 0:
            overview_table.add_row("Reasoning Loops", str(agg["total_reasoning_loops"]),
                                   f"{avg['avg_reasoning_loops']:.1f}")

        overview_table.add_row("Error Rate", "", f"{avg['avg_error_rate']:.1%}")

        self.console.print(overview_table)

        # Individual runs table
        runs_table = Table(title="🏃 Individual Runs", box=box.ROUNDED)
        runs_table.add_column("Run", style="cyan")
        runs_table.add_column("Duration", style="blue")
        runs_table.add_column("Events", style="green")
        runs_table.add_column("Cost", style="yellow")
        runs_table.add_column("Outline", style="magenta")
        runs_table.add_column("Health", style="white")

        for run in summary["run_summaries"]:
            cost_str = self._format_cost(run["cost"]) if run["cost"] > 0 else "-"
            outline_str = f"{run.get('outline_completion', 0):.0f}%" if run.get('outline_completion') else "N/A"
            health_str = run.get('system_health', 'unknown')

            runs_table.add_row(
                run["run_name"],
                f"{run['duration']:.1f}s",
                str(run['events']),
                cost_str,
                outline_str,
                health_str
            )

        self.console.print(runs_table)

        # Insights
        if summary.get("performance_insights"):
            insights_panel = Panel(
                "\n".join(f"• {insight}" for insight in summary["performance_insights"]),
                title="🔍 Performance Insights",
                style="yellow"
            )
            self.console.print(insights_panel)

    except Exception as e:
        error_msg = f"❌ Error printing accumulated summary: {e}"
        if self.use_rich:
            self.console.print(error_msg, style="red bold")
        else:
            print(error_msg)
print_execution_summary()

Print comprehensive execution summary

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
def print_execution_summary(self):
    """Print comprehensive execution summary"""
    try:
        summary = self.event_processor.get_progress_summary()

        if not self.use_rich:
            self._print_summary_fallback(summary)
            return

        self.display_renderer.console.print()
        self.display_renderer.console.print("🎉 [bold green]EXECUTION SUMMARY[/bold green] 🎉")

        # Final status display
        self.display_renderer.render_dual_track_display(self.event_processor)

        # Detailed metrics table
        self._print_detailed_metrics_table(summary)

    except Exception as e:
        print(f"⚠️ Error printing execution summary: {e}")
        self._print_summary_fallback(self.event_processor.get_progress_summary())
print_final_summary()

Print comprehensive final summary with dual-track analysis

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
def print_final_summary(self):
    """Print comprehensive final summary with dual-track analysis"""
    try:
        if not self.use_rich:
            self._print_summary_fallback(self.event_processor.get_progress_summary())
            return

        summary = self.event_processor.get_progress_summary()

        # Clear display and show completion
        self.console.print()
        self.console.print("🎉 [bold green]EXECUTION COMPLETED[/bold green] 🎉")

        # Final dual-track display
        self.display_renderer.render_dual_track_display(self.event_processor)

        # Comprehensive summary table
        self._print_comprehensive_final_table(summary)

        # Performance analysis
        if self.mode in [VerbosityMode.VERBOSE, VerbosityMode.DEBUG]:
            self._print_dual_track_performance_analysis(summary)

    except Exception as e:
        print(f"⚠️ Error printing final summary: {e}")
        self._print_summary_fallback(self.event_processor.get_progress_summary())
progress_callback(event) async

Enhanced progress callback with dual-track processing

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
async def progress_callback(self, event: ProgressEvent):
    """Enhanced progress callback with dual-track processing"""
    try:
        # Update agent info
        if event.agent_name:
            self.agent_name = event.agent_name
        if event.session_id:
            self.session_id = event.session_id

        # Process through dual-track system
        self.event_processor.process_event(event)

        # Update display if enough time has passed or important event
        should_update = (
            time.time() - self._last_display_time >= self._display_interval or
            self._is_important_event(event)
        )

        if should_update and self.auto_refresh:
            self._update_display()
            self._last_display_time = time.time()

    except Exception as e:
        self._consecutive_errors += 1
        if self._consecutive_errors <= self._error_threshold:
            print(f"⚠️ Progress callback error #{self._consecutive_errors}: {e}")
        if self._consecutive_errors > self._error_threshold:
            print("🚨 Progress printing disabled due to excessive errors")
            self.progress_callback = self._noop_callback
reset_global_start_time()

Reset global start time for new session

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
865
866
867
def reset_global_start_time(self):
    """Reset global start time for new session"""
    self._global_start_time = time.time()
set_display_mode(mode)

Change display mode at runtime

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1394
1395
1396
1397
1398
def set_display_mode(self, mode: VerbosityMode):
    """Change display mode at runtime"""
    self.mode = mode
    self.display_renderer = EnhancedDisplayRenderer(mode, self.use_rich)
    self._display_interval = self._get_display_interval()
create_complex_scenario() async

Create a complex scenario with multiple nodes and error recovery

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
async def create_complex_scenario():
    """Create a complex scenario with multiple nodes and error recovery"""
    base_time = time.time()
    events = []

    nodes = [
        "FlowAgent",
        "StrategyOrchestratorNode",
        "TaskPlannerFlow",
        "ResearchNode",
        "AnalysisNode",
        "ValidationNode",
        "ResponseGeneratorNode"
    ]

    # Start execution
    events.append(ProgressEvent(
        event_type="execution_start",
        timestamp=base_time,
        node_name="FlowAgent",
        session_id=f"complex_session_{int(base_time)}",
        metadata={"complexity": "high", "estimated_duration": 25}
    ))

    current_time = base_time

    for i, node in enumerate(nodes[1:], 1):
        # Node entry
        current_time += 0.5
        events.append(ProgressEvent(
            event_type="node_enter",
            timestamp=current_time,
            node_name=node
        ))

        # Main operation (LLM or tool call)
        current_time += 1.2
        if i % 3 == 0:  # Tool call
            success = i != 5  # Fail on ValidationNode
            events.append(ProgressEvent(
                event_type="tool_call",
                timestamp=current_time,
                node_name=node,
                tool_name=f"tool_{i}",
                tool_duration=1.8,
                tool_success=success,
                tool_result=f"Tool result {i}" if success else None,
                tool_error=f"Tool error {i}" if not success else None,
                success=success,
                metadata={"error": "Validation failed", "error_type": "ValidationError"} if not success else {}
            ))

            # Recovery if failed
            if not success:
                current_time += 2.0
                events.append(ProgressEvent(
                    event_type="tool_call",
                    timestamp=current_time,
                    node_name=node,
                    tool_name="recovery_tool",
                    tool_duration=1.5,
                    tool_success=True,
                    tool_result="Recovery successful"
                ))
        else:  # LLM call
            events.append(ProgressEvent(
                event_type="llm_call",
                timestamp=current_time,
                node_name=node,
                llm_model="gpt-4" if i % 2 == 0 else "gpt-3.5-turbo",
                llm_total_tokens=1200 + i * 200,
                llm_cost=0.024 + i * 0.005,
                duration=1.5 + i * 0.3,
                success=True
            ))

        # Node completion
        current_time += 0.8
        if node.endswith("Node"):  # Simple nodes auto-complete
            events.append(ProgressEvent(
                event_type="node_phase",
                timestamp=current_time,
                node_name=node,
                success=True,
                node_duration=current_time - (base_time + i * 2.5)
            ))

    # Final completion
    events.append(ProgressEvent(
        event_type="execution_complete",
        timestamp=current_time + 1.0,
        node_name="FlowAgent",
        node_duration=current_time + 1.0 - base_time,
        status=NodeStatus.COMPLETED,
        success=True,
        metadata={"total_cost": 0.156, "total_tokens": 12500}
    ))

    return events
create_demo_scenario(run_name='Demo Run', duration=10.0, cost=0.025, should_fail=False) async

Create a demo scenario with configurable parameters

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
async def create_demo_scenario(run_name="Demo Run", duration=10.0, cost=0.025, should_fail=False):
    """Create a demo scenario with configurable parameters"""
    base_time = time.time()
    events = []

    # Execution start
    events.append(ProgressEvent(
        event_type="execution_start",
        timestamp=base_time,
        node_name="FlowAgent",
        session_id=f"demo_session_{int(base_time)}",
        metadata={"query": f"Execute {run_name}", "user_id": "demo_user"}
    ))

    # Strategy orchestrator
    events.append(ProgressEvent(
        event_type="node_enter",
        timestamp=base_time + 0.1,
        node_name="StrategyOrchestratorNode"
    ))

    events.append(ProgressEvent(
        event_type="llm_call",
        timestamp=base_time + 1.2,
        node_name="StrategyOrchestratorNode",
        llm_model="gpt-4",
        llm_total_tokens=1200,
        llm_cost=cost * 0.4,
        duration=1.1,
        success=True,
        metadata={"strategy": "research_and_analyze"}
    ))

    # Planning
    events.append(ProgressEvent(
        event_type="node_enter",
        timestamp=base_time + 2.5,
        node_name="PlannerNode"
    ))

    events.append(ProgressEvent(
        event_type="llm_call",
        timestamp=base_time + 3.8,
        node_name="PlannerNode",
        llm_model="gpt-3.5-turbo",
        llm_total_tokens=800,
        llm_cost=cost * 0.2,
        duration=1.3,
        success=True
    ))
    # TaskPlan
    events.append(ProgressEvent(
        event_type="plan_created",
        timestamp=base_time + 4.0,
        node_name="PlannerNode",
        status=NodeStatus.COMPLETED,
        success=True,
        metadata={"plan_name": "Demo Plan", "task_count": 3, "full_plan": TaskPlan(id='bf5053ad-1eae-4dd2-9c08-0c7fab49f80d', name='File Cleanup Task', description='Remove turtle_on_bike.py and execution_summary.json if they exist', tasks=[LLMTask(id='analyze_files', type='LLMTask', description='Analyze the current directory for turtle_on_bike.py and execution_summary.json', status='pending', priority=1, dependencies=[], subtasks=[], result=None, error=None, created_at=datetime(2025, 8, 13, 23, 51, 38, 726320), started_at=None, completed_at=None, metadata={}),ToolTask(id='remove_files', type='ToolTask', description='Delete turtle_on_bike.py and execution_summary.json using shell command', status='pending', priority=1, dependencies=[], subtasks=[], result=None, error=None, created_at=datetime(2025, 8, 13, 23, 51, 38, 726320), started_at=None, completed_at=None, metadata={}, retry_count=0, max_retries=3, critical=False, tool_name='shell', arguments={'command': "Remove-Item -Path 'turtle_on_bike.py', 'execution_summary.json' -ErrorAction SilentlyContinue"}, hypothesis='', validation_criteria='', expectation='')], status='created', created_at=datetime(2025, 8, 13, 23, 51, 38, 726320), metadata={}, execution_strategy='sequential')}
    ))

    # Execution with tools
    events.append(ProgressEvent(
        event_type="node_enter",
        timestamp=base_time + 5.0,
        node_name="ExecutorNode"
    ))

    events.append(ProgressEvent(
        event_type="tool_call",
        timestamp=base_time + 6.2,
        node_name="ExecutorNode",
        tool_name="web_search",
        duration=2.1,
        success=not should_fail,
        tool_result="Search completed" if not should_fail else None,
        tool_error="Search failed" if should_fail else None,
        metadata={"error": "Search API timeout"} if should_fail else {}
    ))

    if not should_fail:
        # Analysis
        events.append(ProgressEvent(
            event_type="llm_call",
            timestamp=base_time + 8.5,
            node_name="AnalysisNode",
            llm_model="gpt-4",
            llm_total_tokens=1500,
            llm_cost=cost * 0.4,
            duration=2.3,
            success=True
        ))

        # Completion
        events.append(ProgressEvent(
            event_type="execution_complete",
            timestamp=base_time + duration,
            node_name="FlowAgent",
            node_duration=duration,
            status=NodeStatus.COMPLETED,
            success=True,
            metadata={"result": "Successfully completed"}
        ))
    else:
        # Failed completion
        events.append(ProgressEvent(
            event_type="error",
            timestamp=base_time + duration * 0.7,
            node_name="ExecutorNode",
            status=NodeStatus.FAILED,
            success=False,
            metadata={
                "error": "Execution failed due to tool error",
                "error_type": "ToolError"
            }
        ))

    return events
demo_accumulated_runs() async

Demo accumulated runs functionality

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
async def demo_accumulated_runs():
    """Demo accumulated runs functionality"""
    print("\n📊 ACCUMULATED RUNS DEMONSTRATION")
    print("=" * 50)
    print("This demo shows how multiple execution runs are accumulated and analyzed")

    printer = ProgressiveTreePrinter(mode=VerbosityMode.STANDARD)

    # Simulate 3 different runs
    runs = [
        ("Market Analysis", "research_and_analyze", True, 12.5, 0.045),
        ("Content Creation", "creative_generation", True, 8.2, 0.032),
        ("Problem Solving", "problem_solving", False, 15.8, 0.067)  # This one fails
    ]

    for i, (run_name, strategy, success, duration, cost) in enumerate(runs):
        print(f"\n🏃 Running execution {i + 1}/3: {run_name}")

        # Strategy selection
        printer.print_strategy_selection(strategy)
        await asyncio.sleep(1)

        # Quick execution simulation
        events = await create_demo_scenario(
            run_name=run_name,
            duration=duration,
            cost=cost,
            should_fail=not success
        )

        for event in events:
            await printer.progress_callback(event)
            await asyncio.sleep(0.2)  # Fast execution

        # Flush the run
        printer.flush(run_name)
        await asyncio.sleep(2)

    # Show accumulated summary
    print("\n📈 ACCUMULATED SUMMARY:")
    printer.print_accumulated_summary()

    # Export data
    if input("\n💾 Export accumulated data? (y/n): ").lower().startswith('y'):
        filepath = printer.export_accumulated_data()
        print(f"✅ Data exported to: {filepath}")
demo_all_modes() async

Demo all verbosity modes with the same scenario

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
async def demo_all_modes():
    """Demo all verbosity modes with the same scenario"""
    print("\n🎭 ALL MODES DEMONSTRATION")
    print("=" * 50)
    print("This demo will run the same scenario in all verbosity modes")
    print("to show the differences in output detail.")

    modes = [
        (VerbosityMode.MINIMAL, "MINIMAL - Only major updates"),
        (VerbosityMode.STANDARD, "STANDARD - Regular updates with panels"),
        (VerbosityMode.VERBOSE, "VERBOSE - Detailed information with metrics"),
        (VerbosityMode.DEBUG, "DEBUG - Full debugging info with all details"),
        (VerbosityMode.REALTIME, "REALTIME - Live updates (will show final tree)")
    ]

    for mode, description in modes:
        print(f"\n{'=' * 60}")
        print(f"🎯 NOW DEMONSTRATING: {description}")
        print(f"{'=' * 60}")

        await asyncio.sleep(2)

        printer = ProgressiveTreePrinter(mode=mode, realtime_minimal=False)

        await asyncio.sleep(1)

        # Run scenario
        events = await create_demo_scenario()

        for event in events:
            await printer.progress_callback(event)
            if mode == VerbosityMode.REALTIME:
                await asyncio.sleep(0.5)
            else:
                await asyncio.sleep(0.3)

        # Final summary
        printer.print_final_summary()

        if mode != modes[-1][0]:  # Not the last mode
            input("\n⏸️  Press Enter to continue to next mode...")
demo_complete_features() async

Complete feature demonstration

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
async def demo_complete_features():
    """Complete feature demonstration"""
    print("\n🚀 COMPLETE FEATURE DEMONSTRATION")
    print("=" * 50)
    print("This demo showcases all features in a comprehensive scenario")

    # Start with verbose mode
    printer = ProgressiveTreePrinter(mode=VerbosityMode.VERBOSE)

    print("\n1️⃣ STRATEGY SELECTION SHOWCASE:")
    strategies = ["direct_response", "research_and_analyze", "problem_solving"]
    for strategy in strategies:
        printer.print_strategy_selection(strategy, context={
            "reasoning": f"Demonstrating {strategy} strategy selection",
            "complexity_score": 0.6,
            "estimated_steps": 4
        })
        await asyncio.sleep(1)

    print("\n2️⃣ COMPLEX EXECUTION WITH ERRORS:")
    # Complex scenario with multiple nodes, errors, and recovery
    complex_events = await create_complex_scenario()

    for event in complex_events:
        await printer.progress_callback(event)
        await asyncio.sleep(0.4)

    printer.print_final_summary()

    print("\n3️⃣ MODE COMPARISON:")
    print("Switching to REALTIME mode for live demo...")
    await asyncio.sleep(2)

    # Switch to realtime mode
    realtime_printer = ProgressiveTreePrinter(
        mode=VerbosityMode.REALTIME,
        realtime_minimal=True
    )

    print("Running same scenario in REALTIME minimal mode:")
    simple_events = await create_demo_scenario()

    for event in simple_events:
        await realtime_printer.progress_callback(event)
        await asyncio.sleep(0.3)

    print("\n\n4️⃣ ACCUMULATED ANALYTICS:")
    # Flush both runs
    printer.flush("Complex Execution")
    realtime_printer.flush("Realtime Execution")

    # Transfer accumulated data to one printer for summary
    printer._accumulated_runs.extend(realtime_printer._accumulated_runs)
    printer.print_accumulated_summary()
demo_enhanced_printer() async

Comprehensive demo of the enhanced progress printer showcasing all modes

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
async def demo_enhanced_printer():
    """Comprehensive demo of the enhanced progress printer showcasing all modes"""

    print("🚀 Starting Enhanced Progress Printer Demo...")
    print("Choose demo type:")
    print("1. All Modes Demo - Show all verbosity modes with same scenario")
    print("2. Interactive Mode Selection - Choose specific mode")
    print("3. Strategy Selection Demo - Show strategy printing")
    print("4. Accumulated Runs Demo - Show multi-run accumulation")
    print("5. Complete Feature Demo - All features in sequence")
    print("6. Exit")

    try:
        choice = input("Enter choice (1-6) [default: 1]: ").strip() or "1"
    except:
        choice = "1"

    if choice == "6":
        return
    elif choice == "1":
        await demo_all_modes()
    elif choice == "2":
        await demo_interactive_mode()
    elif choice == "3":
        await demo_strategy_selection()
    elif choice == "4":
        await demo_accumulated_runs()
    elif choice == "5":
        await demo_complete_features()
demo_interactive_mode() async

Interactive mode selection demo

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
async def demo_interactive_mode():
    """Interactive mode selection demo"""
    print("\n🎮 INTERACTIVE MODE SELECTION")
    print("Choose your preferred verbosity mode:")
    print("1. MINIMAL - Only major updates")
    print("2. STANDARD - Regular updates")
    print("3. VERBOSE - Detailed information")
    print("4. DEBUG - Full debugging info")
    print("5. REALTIME - Live updates")

    try:
        choice = input("Enter choice (1-5) [default: 2]: ").strip() or "2"
        modes = {
            "1": VerbosityMode.MINIMAL,
            "2": VerbosityMode.STANDARD,
            "3": VerbosityMode.VERBOSE,
            "4": VerbosityMode.DEBUG,
            "5": VerbosityMode.REALTIME
        }
        mode = modes.get(choice, VerbosityMode.STANDARD)
    except:
        mode = VerbosityMode.STANDARD

    printer = ProgressiveTreePrinter(mode=mode)
    print(f"\n🎯 Running demo in {mode.value.upper()} mode...")

    # Strategy selection
    printer.print_strategy_selection("slow_complex_planning", context={
        "reasoning": "Task has multiple 'and' conditions requiring complex breakdown",
        "complexity_score": 0.9,
        "estimated_steps": 8
    })

    await asyncio.sleep(1)

    events = await create_demo_scenario()
    for event in events:
        await printer.progress_callback(event)
        await asyncio.sleep(0.5 if mode == VerbosityMode.REALTIME else 0.8)

    printer.print_final_summary()
demo_strategy_selection() async

Demo all strategy selection options

Source code in toolboxv2/mods/isaa/extras/terminal_progress.py
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
async def demo_strategy_selection():
    """Demo all strategy selection options"""
    print("\n🎯 STRATEGY SELECTION DEMONSTRATION")
    print("=" * 50)

    strategies = [
        ("direct_response", "Simple question that needs direct answer"),
        ("fast_simple_planning", "Task needs quick multi-step approach"),
        ("slow_complex_planning", "Complex task with multiple 'and' conditions"),
        ("research_and_analyze", "Needs information gathering and analysis"),
        ("creative_generation", "Content creation with personalization"),
        ("problem_solving", "Analysis with validation required")
    ]

    for mode in [VerbosityMode.MINIMAL, VerbosityMode.STANDARD, VerbosityMode.VERBOSE]:
        print(f"\n🔍 Strategy demo in {mode.value.upper()} mode:")
        print("-" * 40)

        printer = ProgressiveTreePrinter(mode=mode)

        for strategy, reasoning in strategies:
            complexity = 0.3 if "simple" in strategy else 0.7 if "complex" in strategy else 0.5

            printer.print_strategy_selection(
                strategy,
                context={
                    "reasoning": reasoning,
                    "complexity_score": complexity,
                    "estimated_steps": 1 if "direct" in strategy else 3 if "fast" in strategy else 6
                }
            )
            await asyncio.sleep(0.8)

        if mode != VerbosityMode.VERBOSE:
            input("\n⏸️  Press Enter for next mode...")
verbose_output
DynamicVerboseFormatter

Unified, dynamic formatter that adapts to screen size

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
class DynamicVerboseFormatter:
    """Unified, dynamic formatter that adapts to screen size"""

    def __init__(self, print_func=None, min_width: int = 40, max_width: int = 240):
        self.style = Style()
        self.print = print_func or print
        self.min_width = min_width
        self.max_width = max_width
        self._terminal_width = self._get_terminal_width()


    def get_git_info(self):
        """Checks for a git repo and returns its name and branch, or None."""
        try:
            # Check if we are in a git repository
            subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], stderr=subprocess.DEVNULL)

            # Get the repo name (root folder name)
            repo_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'],
                                                stderr=subprocess.DEVNULL).strip().decode('utf-8')
            repo_name = os.path.basename(repo_root)

            # Get the current branch name
            branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
                                             stderr=subprocess.DEVNULL).strip().decode('utf-8')

            return repo_name, branch
        except (subprocess.CalledProcessError, FileNotFoundError):
            # This handles cases where 'git' is not installed or it's not a git repo
            return None

    def _get_terminal_width(self) -> int:
        """Get current terminal width with fallback"""
        try:
            width = shutil.get_terminal_size().columns
            return max(self.min_width, min(width - 2, self.max_width))
        except (OSError, AttributeError):
            return 80

    def _wrap_text(self, text: str, width: int = None) -> list[str]:
        """Wrap text to fit terminal width"""
        if width is None:
            width = self._terminal_width - 4  # Account for borders

        words = text.split()
        lines = []
        current_line = []
        current_length = 0

        for word in words:
            if current_length + len(word) + len(current_line) <= width:
                current_line.append(word)
                current_length += len(word)
            else:
                if current_line:
                    lines.append(' '.join(current_line))
                current_line = [word]
                current_length = len(word)

        if current_line:
            lines.append(' '.join(current_line))

        return lines

    def _create_border(self, char: str = "─", width: int = None) -> str:
        """Create a border line that fits the terminal"""
        if width is None:
            width = self._terminal_width
        return char * width

    def _center_text(self, text: str, width: int = None) -> str:
        """Center text within the given width"""
        if width is None:
            width = self._terminal_width

        # Remove ANSI codes for length calculation
        clean_text = self._strip_ansi(text)
        padding = max(0, (width - len(clean_text)) // 2)
        return " " * padding + text

    def _strip_ansi(self, text: str) -> str:
        """Remove ANSI escape codes for length calculation"""
        import re
        ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
        return ansi_escape.sub('', text)

    def print_header(self, text: str):
        """Print a dynamic header that adapts to screen size"""
        self._terminal_width = self._get_terminal_width()

        if self._terminal_width < 60:  # Tiny screen
            self.print()
            self.print(self.style.CYAN("=" * self._terminal_width))
            self.print(self.style.CYAN(self.style.Bold(text)))
            self.print(self.style.CYAN("=" * self._terminal_width))
        else:  # Regular/large screen
            border_width = min(len(text) + 2, self._terminal_width - 2)
            border = "─" * border_width

            self.print()
            self.print(self.style.CYAN(f"┌{border}┐"))
            self.print(self.style.CYAN(f"│ {self.style.Bold(text).center(border_width - 2)} │"))
            self.print(self.style.CYAN(f"└{border}┘"))
        self.print()

    def print_section(self, title: str, content: str):
        """Print a clean section with adaptive formatting"""
        self._terminal_width = self._get_terminal_width()

        # Title
        if self._terminal_width < 60:
            self.print(f"\n{self.style.BLUE('●')} {self.style.Bold(title)}")
        else:
            self.print(f"\n{self.style.BLUE('●')} {self.style.Bold(self.style.BLUE(title))}")

        # Content with proper wrapping
        for line in content.split('\n'):
            if line.strip():
                wrapped_lines = self._wrap_text(line.strip())
                for wrapped_line in wrapped_lines:
                    if self._terminal_width < 60:
                        self.print(f"  {wrapped_line}")
                    else:
                        self.print(f"  {self.style.GREY('│')} {wrapped_line}")
        self.print()

    def print_progress_bar(self, current: int, maximum: int, title: str = "Progress"):
        """Dynamic progress bar that adapts to screen size"""
        self._terminal_width = self._get_terminal_width()

        # Calculate bar width based on screen size
        if self._terminal_width < 60:
            bar_width = 10
            template = f"\r{title}: [{{}}] {current}/{maximum}"
        else:
            bar_width = min(30, self._terminal_width - 30)
            template = f"\r{self.style.CYAN(title)}: [{{}}] {current}/{maximum} ({current / maximum * 100:.1f}%)"

        progress = int((current / maximum) * bar_width)
        bar = "█" * progress + "░" * (bar_width - progress)

        self.print(template.format(bar), end='', flush=True)

    def print_state(self, state: str, details: dict[str, Any] = None) -> str:
        """Print current state with adaptive formatting"""
        self._terminal_width = self._get_terminal_width()

        state_colors = {
            'ACTION': self.style.GREEN2,
            'PROCESSING': self.style.YELLOW2,
            'BRAKE': self.style.RED2,
            'DONE': self.style.BLUE2,
            'ERROR': self.style.RED,
            'SUCCESS': self.style.GREEN,
            'INFO': self.style.CYAN
        }

        color_func = state_colors.get(state.upper(), self.style.WHITE2)

        if self._terminal_width < 60:
            # Compact format for small screens
            self.print(f"\n[{color_func(state)}]")
            result = f"\n[{state}]"
        else:
            # Full format for larger screens
            self.print(f"\n{self.style.Bold('State:')} {color_func(state)}")
            result = f"\nState: {state}"

        if details:
            for key, value in details.items():
                # Truncate long values on small screens
                if self._terminal_width < 60 and len(str(value)) > 30:
                    display_value = str(value)[:27] + "..."
                else:
                    display_value = str(value)

                if self._terminal_width < 60:
                    self.print(f"  {key}: {display_value}")
                    result += f"\n  {key}: {display_value}"
                else:
                    self.print(f"  {self.style.GREY('├─')} {self.style.CYAN(key)}: {display_value}")
                    result += f"\n  ├─ {key}: {display_value}"

        return result

    def print_code_block(self, code: str, language: str = "python"):
        """Print code with syntax awareness and proper formatting"""
        self._terminal_width = self._get_terminal_width()

        if self._terminal_width < 60:
            # Simple format for small screens
            self.print(f"\n{self.style.GREY('Code:')}")
            for line in code.split('\n'):
                self.print(f"  {line}")
        else:
            # Detailed format for larger screens
            self.print(f"\n{self.style.BLUE('┌─')} {self.style.YELLOW2(f'{language.upper()} Code')}")

            lines = code.split('\n')
            for i, line in enumerate(lines):
                if i == len(lines) - 1 and not line.strip():
                    continue

                # Wrap long lines
                if len(line) > self._terminal_width - 6:
                    wrapped = self._wrap_text(line, self._terminal_width - 6)
                    for j, wrapped_line in enumerate(wrapped):
                        prefix = "│" if j == 0 else "│"
                        self.print(f"{self.style.BLUE(prefix)} {wrapped_line}")
                else:
                    self.print(f"{self.style.BLUE('│')} {line}")

            self.print(f"{self.style.BLUE('└─')} {self.style.GREY('End of code block')}")

    def print_table(self, headers: list[str], rows: list[list[str]]):
        """Print a dynamic table that adapts to screen size"""
        self._terminal_width = self._get_terminal_width()

        if not rows:
            return

        # Calculate column widths
        all_data = [headers] + rows
        col_widths = []

        for col in range(len(headers)):
            max_width = max(len(str(row[col])) for row in all_data if col < len(row))
            col_widths.append(min(max_width, self._terminal_width // len(headers) - 2))

        # Adjust if total width exceeds terminal
        total_width = sum(col_widths) + len(headers) * 3 + 1
        if total_width > self._terminal_width:
            # Proportionally reduce column widths
            scale_factor = (self._terminal_width - len(headers) * 3 - 1) / sum(col_widths)
            col_widths = [max(8, int(w * scale_factor)) for w in col_widths]

        # Print table
        self._print_table_row(headers, col_widths, is_header=True)
        self._print_table_separator(col_widths)

        for row in rows:
            self._print_table_row(row, col_widths)

    def _print_table_row(self, row: list[str], widths: list[int], is_header: bool = False):
        """Helper method to print a table row"""
        formatted_cells = []
        for _i, (cell, width) in enumerate(zip(row, widths, strict=False)):
            cell_str = str(cell)
            if len(cell_str) > width:
                cell_str = cell_str[:width - 3] + "..."

            if is_header:
                formatted_cells.append(self.style.Bold(self.style.CYAN(cell_str.ljust(width))))
            else:
                formatted_cells.append(cell_str.ljust(width))

        self.print(f"│ {' │ '.join(formatted_cells)} │")

    def _print_table_separator(self, widths: list[int]):
        """Helper method to print table separator"""
        parts = ['─' * w for w in widths]
        self.print(f"├─{'─┼─'.join(parts)}─┤")

    async def process_with_spinner(self, message: str, coroutine):
        """Execute coroutine with adaptive spinner"""
        self._terminal_width = self._get_terminal_width()

        if self._terminal_width < 60:
            # Simple spinner for small screens
            spinner_symbols = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
        else:
            # Detailed spinner for larger screens
            spinner_symbols = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"

        # Truncate message if too long
        if len(message) > self._terminal_width - 10:
            display_message = message[:self._terminal_width - 13] + "..."
        else:
            display_message = message

        with Spinner(f"{self.style.CYAN('●')} {display_message}", symbols=spinner_symbols):
            return await coroutine

    def print_git_info(self) -> str | None:
        """Get current git branch with error handling"""
        try:
            result = subprocess.run(
                ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
                capture_output=True, text=True, timeout=2
            )
            if result.returncode == 0 and result.stdout.strip():
                branch = result.stdout.strip()

                # Check for uncommitted changes
                status_result = subprocess.run(
                    ['git', 'status', '--porcelain'],
                    capture_output=True, text=True, timeout=1
                )
                dirty = "*" if status_result.stdout.strip() else ""

                git_info = f"{branch}{dirty}"
                self.print_info(f"Git: {git_info}")
                return git_info
        except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
            pass
        return None

    # Convenience methods with consistent styling
    def print_error(self, message: str):
        """Print error message with consistent formatting"""
        self.print(f"{self.style.RED('✗')} {self.style.RED(message)}")

    def print_success(self, message: str):
        """Print success message with consistent formatting"""
        self.print(f"{self.style.GREEN('✓')} {self.style.GREEN(message)}")

    def print_warning(self, message: str):
        """Print warning message with consistent formatting"""
        self.print(f"{self.style.YELLOW('⚠')} {self.style.YELLOW(message)}")

    def print_info(self, message: str):
        """Print info message with consistent formatting"""
        self.print(f"{self.style.CYAN('ℹ')} {self.style.CYAN(message)}")

    def print_debug(self, message: str):
        """Print debug message with consistent formatting"""
        self.print(f"{self.style.GREY('🐛')} {self.style.GREY(message)}")
get_git_info()

Checks for a git repo and returns its name and branch, or None.

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
def get_git_info(self):
    """Checks for a git repo and returns its name and branch, or None."""
    try:
        # Check if we are in a git repository
        subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], stderr=subprocess.DEVNULL)

        # Get the repo name (root folder name)
        repo_root = subprocess.check_output(['git', 'rev-parse', '--show-toplevel'],
                                            stderr=subprocess.DEVNULL).strip().decode('utf-8')
        repo_name = os.path.basename(repo_root)

        # Get the current branch name
        branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
                                         stderr=subprocess.DEVNULL).strip().decode('utf-8')

        return repo_name, branch
    except (subprocess.CalledProcessError, FileNotFoundError):
        # This handles cases where 'git' is not installed or it's not a git repo
        return None
print_code_block(code, language='python')

Print code with syntax awareness and proper formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
def print_code_block(self, code: str, language: str = "python"):
    """Print code with syntax awareness and proper formatting"""
    self._terminal_width = self._get_terminal_width()

    if self._terminal_width < 60:
        # Simple format for small screens
        self.print(f"\n{self.style.GREY('Code:')}")
        for line in code.split('\n'):
            self.print(f"  {line}")
    else:
        # Detailed format for larger screens
        self.print(f"\n{self.style.BLUE('┌─')} {self.style.YELLOW2(f'{language.upper()} Code')}")

        lines = code.split('\n')
        for i, line in enumerate(lines):
            if i == len(lines) - 1 and not line.strip():
                continue

            # Wrap long lines
            if len(line) > self._terminal_width - 6:
                wrapped = self._wrap_text(line, self._terminal_width - 6)
                for j, wrapped_line in enumerate(wrapped):
                    prefix = "│" if j == 0 else "│"
                    self.print(f"{self.style.BLUE(prefix)} {wrapped_line}")
            else:
                self.print(f"{self.style.BLUE('│')} {line}")

        self.print(f"{self.style.BLUE('└─')} {self.style.GREY('End of code block')}")
print_debug(message)

Print debug message with consistent formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
336
337
338
def print_debug(self, message: str):
    """Print debug message with consistent formatting"""
    self.print(f"{self.style.GREY('🐛')} {self.style.GREY(message)}")
print_error(message)

Print error message with consistent formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
320
321
322
def print_error(self, message: str):
    """Print error message with consistent formatting"""
    self.print(f"{self.style.RED('✗')} {self.style.RED(message)}")
print_git_info()

Get current git branch with error handling

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
def print_git_info(self) -> str | None:
    """Get current git branch with error handling"""
    try:
        result = subprocess.run(
            ['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
            capture_output=True, text=True, timeout=2
        )
        if result.returncode == 0 and result.stdout.strip():
            branch = result.stdout.strip()

            # Check for uncommitted changes
            status_result = subprocess.run(
                ['git', 'status', '--porcelain'],
                capture_output=True, text=True, timeout=1
            )
            dirty = "*" if status_result.stdout.strip() else ""

            git_info = f"{branch}{dirty}"
            self.print_info(f"Git: {git_info}")
            return git_info
    except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
        pass
    return None
print_header(text)

Print a dynamic header that adapts to screen size

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
def print_header(self, text: str):
    """Print a dynamic header that adapts to screen size"""
    self._terminal_width = self._get_terminal_width()

    if self._terminal_width < 60:  # Tiny screen
        self.print()
        self.print(self.style.CYAN("=" * self._terminal_width))
        self.print(self.style.CYAN(self.style.Bold(text)))
        self.print(self.style.CYAN("=" * self._terminal_width))
    else:  # Regular/large screen
        border_width = min(len(text) + 2, self._terminal_width - 2)
        border = "─" * border_width

        self.print()
        self.print(self.style.CYAN(f"┌{border}┐"))
        self.print(self.style.CYAN(f"│ {self.style.Bold(text).center(border_width - 2)} │"))
        self.print(self.style.CYAN(f"└{border}┘"))
    self.print()
print_info(message)

Print info message with consistent formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
332
333
334
def print_info(self, message: str):
    """Print info message with consistent formatting"""
    self.print(f"{self.style.CYAN('ℹ')} {self.style.CYAN(message)}")
print_progress_bar(current, maximum, title='Progress')

Dynamic progress bar that adapts to screen size

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
def print_progress_bar(self, current: int, maximum: int, title: str = "Progress"):
    """Dynamic progress bar that adapts to screen size"""
    self._terminal_width = self._get_terminal_width()

    # Calculate bar width based on screen size
    if self._terminal_width < 60:
        bar_width = 10
        template = f"\r{title}: [{{}}] {current}/{maximum}"
    else:
        bar_width = min(30, self._terminal_width - 30)
        template = f"\r{self.style.CYAN(title)}: [{{}}] {current}/{maximum} ({current / maximum * 100:.1f}%)"

    progress = int((current / maximum) * bar_width)
    bar = "█" * progress + "░" * (bar_width - progress)

    self.print(template.format(bar), end='', flush=True)
print_section(title, content)

Print a clean section with adaptive formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def print_section(self, title: str, content: str):
    """Print a clean section with adaptive formatting"""
    self._terminal_width = self._get_terminal_width()

    # Title
    if self._terminal_width < 60:
        self.print(f"\n{self.style.BLUE('●')} {self.style.Bold(title)}")
    else:
        self.print(f"\n{self.style.BLUE('●')} {self.style.Bold(self.style.BLUE(title))}")

    # Content with proper wrapping
    for line in content.split('\n'):
        if line.strip():
            wrapped_lines = self._wrap_text(line.strip())
            for wrapped_line in wrapped_lines:
                if self._terminal_width < 60:
                    self.print(f"  {wrapped_line}")
                else:
                    self.print(f"  {self.style.GREY('│')} {wrapped_line}")
    self.print()
print_state(state, details=None)

Print current state with adaptive formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
def print_state(self, state: str, details: dict[str, Any] = None) -> str:
    """Print current state with adaptive formatting"""
    self._terminal_width = self._get_terminal_width()

    state_colors = {
        'ACTION': self.style.GREEN2,
        'PROCESSING': self.style.YELLOW2,
        'BRAKE': self.style.RED2,
        'DONE': self.style.BLUE2,
        'ERROR': self.style.RED,
        'SUCCESS': self.style.GREEN,
        'INFO': self.style.CYAN
    }

    color_func = state_colors.get(state.upper(), self.style.WHITE2)

    if self._terminal_width < 60:
        # Compact format for small screens
        self.print(f"\n[{color_func(state)}]")
        result = f"\n[{state}]"
    else:
        # Full format for larger screens
        self.print(f"\n{self.style.Bold('State:')} {color_func(state)}")
        result = f"\nState: {state}"

    if details:
        for key, value in details.items():
            # Truncate long values on small screens
            if self._terminal_width < 60 and len(str(value)) > 30:
                display_value = str(value)[:27] + "..."
            else:
                display_value = str(value)

            if self._terminal_width < 60:
                self.print(f"  {key}: {display_value}")
                result += f"\n  {key}: {display_value}"
            else:
                self.print(f"  {self.style.GREY('├─')} {self.style.CYAN(key)}: {display_value}")
                result += f"\n  ├─ {key}: {display_value}"

    return result
print_success(message)

Print success message with consistent formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
324
325
326
def print_success(self, message: str):
    """Print success message with consistent formatting"""
    self.print(f"{self.style.GREEN('✓')} {self.style.GREEN(message)}")
print_table(headers, rows)

Print a dynamic table that adapts to screen size

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
def print_table(self, headers: list[str], rows: list[list[str]]):
    """Print a dynamic table that adapts to screen size"""
    self._terminal_width = self._get_terminal_width()

    if not rows:
        return

    # Calculate column widths
    all_data = [headers] + rows
    col_widths = []

    for col in range(len(headers)):
        max_width = max(len(str(row[col])) for row in all_data if col < len(row))
        col_widths.append(min(max_width, self._terminal_width // len(headers) - 2))

    # Adjust if total width exceeds terminal
    total_width = sum(col_widths) + len(headers) * 3 + 1
    if total_width > self._terminal_width:
        # Proportionally reduce column widths
        scale_factor = (self._terminal_width - len(headers) * 3 - 1) / sum(col_widths)
        col_widths = [max(8, int(w * scale_factor)) for w in col_widths]

    # Print table
    self._print_table_row(headers, col_widths, is_header=True)
    self._print_table_separator(col_widths)

    for row in rows:
        self._print_table_row(row, col_widths)
print_warning(message)

Print warning message with consistent formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
328
329
330
def print_warning(self, message: str):
    """Print warning message with consistent formatting"""
    self.print(f"{self.style.YELLOW('⚠')} {self.style.YELLOW(message)}")
process_with_spinner(message, coroutine) async

Execute coroutine with adaptive spinner

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
async def process_with_spinner(self, message: str, coroutine):
    """Execute coroutine with adaptive spinner"""
    self._terminal_width = self._get_terminal_width()

    if self._terminal_width < 60:
        # Simple spinner for small screens
        spinner_symbols = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
    else:
        # Detailed spinner for larger screens
        spinner_symbols = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"

    # Truncate message if too long
    if len(message) > self._terminal_width - 10:
        display_message = message[:self._terminal_width - 13] + "..."
    else:
        display_message = message

    with Spinner(f"{self.style.CYAN('●')} {display_message}", symbols=spinner_symbols):
        return await coroutine
EnhancedVerboseOutput

Main interface for verbose output with full functionality

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
class EnhancedVerboseOutput:
    """Main interface for verbose output with full functionality"""

    def __init__(self, verbose: bool = True, print_func=None, **formatter_kwargs):
        self.verbose = verbose
        self.print = print_func or print
        self.formatter = DynamicVerboseFormatter(self.print, **formatter_kwargs)
        self._start_time = time.time()

    def __getattr__(self, name):
        """Delegate to formatter for convenience"""
        return getattr(self.formatter, name)

    async def print_agent_response(self, response: str):
        await self.log_message("assistant", response)

    async def print_thought(self, thought: str):
        await self.log_message("assistant", f"Thought: {thought}")

    async def log_message(self, role: str, content: str):
        """Log chat messages with role-based formatting"""
        if not self.verbose:
            return

        role_formats = {
            'user': (self.formatter.style.GREEN, "👤"),
            'assistant': (self.formatter.style.BLUE, "🤖"),
            'system': (self.formatter.style.YELLOW, "⚙️"),
            'error': (self.formatter.style.RED, "❌"),
            'debug': (self.formatter.style.GREY, "🐛")
        }

        color_func, icon = role_formats.get(role.lower(), (self.formatter.style.WHITE, "•"))

        if content.startswith("```"):
            self.formatter.print_code_block(content)
            return

        if content.startswith("{") or content.startswith("[") and content.endswith("}") or content.endswith("]"):
            content = json.dumps(json.loads(content), indent=2)

        # Adapt formatting based on screen size
        if self.formatter._terminal_width < 60:
            self.print(f"\n{icon} [{role.upper()}]")
            # Wrap content for small screens
            wrapped_content = self.formatter._wrap_text(content, self.formatter._terminal_width - 2)
            for line in wrapped_content:
                self.print(f"  {line}")
        else:
            self.print(f"\n{icon} {color_func(f'[{role.upper()}]')}")
            self.print(f"{self.formatter.style.GREY('└─')} {content}")
        self.print()

    async def log_process_result(self, result: dict[str, Any]):
        """Log processing results with structured formatting"""
        if not self.verbose:
            return

        content_parts = []

        if 'action' in result:
            content_parts.append(f"Action: {result['action']}")
        if 'is_completed' in result:
            content_parts.append(f"Completed: {result['is_completed']}")
        if 'effectiveness' in result:
            content_parts.append(f"Effectiveness: {result['effectiveness']}")
        if 'recommendations' in result:
            content_parts.append(f"Recommendations:\n{result['recommendations']}")
        if 'workflow' in result:
            content_parts.append(f"Workflow:\n{result['workflow']}")
        if 'errors' in result and result['errors']:
            content_parts.append(f"Errors: {result['errors']}")
        if 'content' in result:
            content_parts.append(f"Content:\n{result['content']}")

        self.formatter.print_section("Process Result", '\n'.join(content_parts))

    def log_header(self, text: str):
        """Log header with timing information"""
        if not self.verbose:
            return

        elapsed = time.time() - self._start_time
        timing = f" ({elapsed / 60:.1f}m)" if elapsed > 60 else f" ({elapsed:.1f}s)"

        self.formatter.print_header(f"{text}{timing}")

    def log_state(self, state: str, user_ns: dict = None, override: bool = False):
        """Log state with optional override"""
        if not self.verbose and not override:
            return

        return self.formatter.print_state(state, user_ns)

    async def process(self, message: str, coroutine):
        """Process with optional spinner"""
        if not self.verbose:
            return await coroutine

        if message.lower() in ["code", "silent"]:
            return await coroutine

        return await self.formatter.process_with_spinner(message, coroutine)

    def print_tool_call(self, tool_name: str, tool_args: dict, result: str | None = None):
        """
        Gibt Informationen zum Tool-Aufruf aus.
        Versucht, das Ergebnis als JSON zu formatieren, wenn möglich.
        """
        if not self.verbose:
            return

        # Argumente wie zuvor formatieren
        args_str = json.dumps(tool_args, indent=2, ensure_ascii=False) if tool_args else "None"
        content = f"Tool: {tool_name}\nArguments:\n{args_str}"

        if result:
            result_output = ""
            try:
                # 1. Versuch, den String als JSON zu parsen
                data = json.loads(result)

                # 2. Prüfen, ob das Ergebnis ein Dictionary ist (der häufigste Fall)
                if isinstance(data, dict):
                    # Eine Kopie für die Anzeige erstellen, um den 'output'-Wert zu ersetzen
                    display_data = data.copy()
                    output_preview = ""

                    # Spezielle Handhabung für einen langen 'output'-String, falls vorhanden
                    if 'output' in display_data and isinstance(display_data['output'], str):
                        full_output = display_data['output']
                        # Den langen String im JSON durch einen Platzhalter ersetzen
                        display_data['output'] = "<-- [Inhalt wird separat formatiert]"

                        # Vorschau mit den ersten 3 Zeilen erstellen
                        lines = full_output.strip().split('\n')[:3]
                        preview_text = '\n'.join(lines)
                        output_preview = f"\n\n--- Vorschau für 'output' ---\n\x1b[90m{preview_text}\n...\x1b[0m"  # Hellgrauer Text
                        # display_data['output'] = output_preview
                    # Das formatierte JSON (mit Platzhalter) zum Inhalt hinzufügen
                    formatted_json = json.dumps(display_data, indent=2, ensure_ascii=False)
                    result_output = f"Geparstes Dictionary:\n{formatted_json}{output_preview}"

                else:
                    # Falls es valides JSON, aber kein Dictionary ist (z.B. eine Liste)
                    result_output = f"Gepastes JSON (kein Dictionary):\n{json.dumps(data, indent=2, ensure_ascii=False)}"

            except json.JSONDecodeError:
                # 3. Wenn Parsen fehlschlägt, den String als Rohtext behandeln
                result_output = f"{result}"

            content += f"\nResult:\n{result_output}"

        else:
            # Fall, wenn der Task noch läuft
            content += "\nResult: In progress..."

        # Den gesamten Inhalt an den Formatter übergeben
        self.formatter.print_section("Tool Call", content)

    def print_event(self, event: dict):
        """Print event information"""
        if not self.verbose:
            return

        if event.get("content") and event["content"].get("parts"):
            for part in event["content"]["parts"]:
                if part.get("text"):
                    self.formatter.print_info(f"Thought: {part['text']}")
                if part.get("function_call"):
                    self.print_tool_call(
                        part["function_call"]["name"],
                        part["function_call"]["args"]
                    )
                if part.get("function_response"):
                    result = part["function_response"]["response"].get("result", "")
                    self.print_tool_call(
                        part["function_response"]["name"],
                        {},
                        str(result)
                    )

        if event.get("usage_metadata"):
            self.formatter.print_info(f"Token usage: {event['usage_metadata']}")

    @contextmanager
    def section_context(self, title: str):
        """Context manager for sections"""
        if self.verbose:
            self.formatter.print_section(title, "Starting...")
        try:
            yield
        finally:
            if self.verbose:
                self.formatter.print_success(f"Completed: {title}")

    def clear_line(self):
        """Clear current line"""
        self.print('\r' + ' ' * self.formatter._terminal_width + '\r', end='')

    def print_separator(self, char: str = "─"):
        """Print a separator line"""
        self.print(self.formatter.style.GREY(char * self.formatter._terminal_width))

    def print_warning(self, message: str):
        """Print a warning message with yellow style"""
        if self.verbose:
            self.print(self.formatter.style.YELLOW(f"⚠️  WARNING: {message}"))

    def print_error(self, message: str):
        """Print an error message with red style"""
        if self.verbose:
            self.print(self.formatter.style.RED(f"❌ ERROR: {message}"))

    def print_success(self, message: str):
        """Print a success message with green style"""
        if self.verbose:
            self.print(self.formatter.style.GREEN(f"✅ SUCCESS: {message}"))
__getattr__(name)

Delegate to formatter for convenience

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
350
351
352
def __getattr__(self, name):
    """Delegate to formatter for convenience"""
    return getattr(self.formatter, name)
clear_line()

Clear current line

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
537
538
539
def clear_line(self):
    """Clear current line"""
    self.print('\r' + ' ' * self.formatter._terminal_width + '\r', end='')
log_header(text)

Log header with timing information

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
418
419
420
421
422
423
424
425
426
def log_header(self, text: str):
    """Log header with timing information"""
    if not self.verbose:
        return

    elapsed = time.time() - self._start_time
    timing = f" ({elapsed / 60:.1f}m)" if elapsed > 60 else f" ({elapsed:.1f}s)"

    self.formatter.print_header(f"{text}{timing}")
log_message(role, content) async

Log chat messages with role-based formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
async def log_message(self, role: str, content: str):
    """Log chat messages with role-based formatting"""
    if not self.verbose:
        return

    role_formats = {
        'user': (self.formatter.style.GREEN, "👤"),
        'assistant': (self.formatter.style.BLUE, "🤖"),
        'system': (self.formatter.style.YELLOW, "⚙️"),
        'error': (self.formatter.style.RED, "❌"),
        'debug': (self.formatter.style.GREY, "🐛")
    }

    color_func, icon = role_formats.get(role.lower(), (self.formatter.style.WHITE, "•"))

    if content.startswith("```"):
        self.formatter.print_code_block(content)
        return

    if content.startswith("{") or content.startswith("[") and content.endswith("}") or content.endswith("]"):
        content = json.dumps(json.loads(content), indent=2)

    # Adapt formatting based on screen size
    if self.formatter._terminal_width < 60:
        self.print(f"\n{icon} [{role.upper()}]")
        # Wrap content for small screens
        wrapped_content = self.formatter._wrap_text(content, self.formatter._terminal_width - 2)
        for line in wrapped_content:
            self.print(f"  {line}")
    else:
        self.print(f"\n{icon} {color_func(f'[{role.upper()}]')}")
        self.print(f"{self.formatter.style.GREY('└─')} {content}")
    self.print()
log_process_result(result) async

Log processing results with structured formatting

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
async def log_process_result(self, result: dict[str, Any]):
    """Log processing results with structured formatting"""
    if not self.verbose:
        return

    content_parts = []

    if 'action' in result:
        content_parts.append(f"Action: {result['action']}")
    if 'is_completed' in result:
        content_parts.append(f"Completed: {result['is_completed']}")
    if 'effectiveness' in result:
        content_parts.append(f"Effectiveness: {result['effectiveness']}")
    if 'recommendations' in result:
        content_parts.append(f"Recommendations:\n{result['recommendations']}")
    if 'workflow' in result:
        content_parts.append(f"Workflow:\n{result['workflow']}")
    if 'errors' in result and result['errors']:
        content_parts.append(f"Errors: {result['errors']}")
    if 'content' in result:
        content_parts.append(f"Content:\n{result['content']}")

    self.formatter.print_section("Process Result", '\n'.join(content_parts))
log_state(state, user_ns=None, override=False)

Log state with optional override

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
428
429
430
431
432
433
def log_state(self, state: str, user_ns: dict = None, override: bool = False):
    """Log state with optional override"""
    if not self.verbose and not override:
        return

    return self.formatter.print_state(state, user_ns)
print_error(message)

Print an error message with red style

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
550
551
552
553
def print_error(self, message: str):
    """Print an error message with red style"""
    if self.verbose:
        self.print(self.formatter.style.RED(f"❌ ERROR: {message}"))
print_event(event)

Print event information

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
def print_event(self, event: dict):
    """Print event information"""
    if not self.verbose:
        return

    if event.get("content") and event["content"].get("parts"):
        for part in event["content"]["parts"]:
            if part.get("text"):
                self.formatter.print_info(f"Thought: {part['text']}")
            if part.get("function_call"):
                self.print_tool_call(
                    part["function_call"]["name"],
                    part["function_call"]["args"]
                )
            if part.get("function_response"):
                result = part["function_response"]["response"].get("result", "")
                self.print_tool_call(
                    part["function_response"]["name"],
                    {},
                    str(result)
                )

    if event.get("usage_metadata"):
        self.formatter.print_info(f"Token usage: {event['usage_metadata']}")
print_separator(char='─')

Print a separator line

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
541
542
543
def print_separator(self, char: str = "─"):
    """Print a separator line"""
    self.print(self.formatter.style.GREY(char * self.formatter._terminal_width))
print_success(message)

Print a success message with green style

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
555
556
557
558
def print_success(self, message: str):
    """Print a success message with green style"""
    if self.verbose:
        self.print(self.formatter.style.GREEN(f"✅ SUCCESS: {message}"))
print_tool_call(tool_name, tool_args, result=None)

Gibt Informationen zum Tool-Aufruf aus. Versucht, das Ergebnis als JSON zu formatieren, wenn möglich.

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
def print_tool_call(self, tool_name: str, tool_args: dict, result: str | None = None):
    """
    Gibt Informationen zum Tool-Aufruf aus.
    Versucht, das Ergebnis als JSON zu formatieren, wenn möglich.
    """
    if not self.verbose:
        return

    # Argumente wie zuvor formatieren
    args_str = json.dumps(tool_args, indent=2, ensure_ascii=False) if tool_args else "None"
    content = f"Tool: {tool_name}\nArguments:\n{args_str}"

    if result:
        result_output = ""
        try:
            # 1. Versuch, den String als JSON zu parsen
            data = json.loads(result)

            # 2. Prüfen, ob das Ergebnis ein Dictionary ist (der häufigste Fall)
            if isinstance(data, dict):
                # Eine Kopie für die Anzeige erstellen, um den 'output'-Wert zu ersetzen
                display_data = data.copy()
                output_preview = ""

                # Spezielle Handhabung für einen langen 'output'-String, falls vorhanden
                if 'output' in display_data and isinstance(display_data['output'], str):
                    full_output = display_data['output']
                    # Den langen String im JSON durch einen Platzhalter ersetzen
                    display_data['output'] = "<-- [Inhalt wird separat formatiert]"

                    # Vorschau mit den ersten 3 Zeilen erstellen
                    lines = full_output.strip().split('\n')[:3]
                    preview_text = '\n'.join(lines)
                    output_preview = f"\n\n--- Vorschau für 'output' ---\n\x1b[90m{preview_text}\n...\x1b[0m"  # Hellgrauer Text
                    # display_data['output'] = output_preview
                # Das formatierte JSON (mit Platzhalter) zum Inhalt hinzufügen
                formatted_json = json.dumps(display_data, indent=2, ensure_ascii=False)
                result_output = f"Geparstes Dictionary:\n{formatted_json}{output_preview}"

            else:
                # Falls es valides JSON, aber kein Dictionary ist (z.B. eine Liste)
                result_output = f"Gepastes JSON (kein Dictionary):\n{json.dumps(data, indent=2, ensure_ascii=False)}"

        except json.JSONDecodeError:
            # 3. Wenn Parsen fehlschlägt, den String als Rohtext behandeln
            result_output = f"{result}"

        content += f"\nResult:\n{result_output}"

    else:
        # Fall, wenn der Task noch läuft
        content += "\nResult: In progress..."

    # Den gesamten Inhalt an den Formatter übergeben
    self.formatter.print_section("Tool Call", content)
print_warning(message)

Print a warning message with yellow style

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
545
546
547
548
def print_warning(self, message: str):
    """Print a warning message with yellow style"""
    if self.verbose:
        self.print(self.formatter.style.YELLOW(f"⚠️  WARNING: {message}"))
process(message, coroutine) async

Process with optional spinner

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
435
436
437
438
439
440
441
442
443
async def process(self, message: str, coroutine):
    """Process with optional spinner"""
    if not self.verbose:
        return await coroutine

    if message.lower() in ["code", "silent"]:
        return await coroutine

    return await self.formatter.process_with_spinner(message, coroutine)
section_context(title)

Context manager for sections

Source code in toolboxv2/mods/isaa/extras/verbose_output.py
526
527
528
529
530
531
532
533
534
535
@contextmanager
def section_context(self, title: str):
    """Context manager for sections"""
    if self.verbose:
        self.formatter.print_section(title, "Starting...")
    try:
        yield
    finally:
        if self.verbose:
            self.formatter.print_success(f"Completed: {title}")
clean_markdown_robust(content)

Robust markdown cleaning

Source code in toolboxv2/mods/isaa/extras/web_search.py
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
def clean_markdown_robust(content: str) -> str:
    """Robust markdown cleaning"""
    if not content:
        return ""

    # Remove common encoding artifacts more aggressively
    replacements = {
        '�': '',
        '’': "'", '“': '"', 'â€': '"', '…': '...',
        'â€"': '-', 'â€"': '--', 'Â': ' ',
        'á': 'á', 'é': 'é', 'í': 'í', 'ó': 'ó', 'ú': 'ú',
        '•': '•', '·': '·', '«': '«', '»': '»'
    }

    for old, new in replacements.items():
        content = content.replace(old, new)

    # Remove lines with too many non-ASCII characters
    lines = content.split('\n')
    cleaned_lines = []

    for line in lines:
        line = line.strip()
        if not line:
            cleaned_lines.append('')
            continue

        # Skip lines that are mostly garbled
        ascii_chars = sum(1 for c in line if ord(c) < 128)
        if len(line) > 10 and ascii_chars / len(line) < 0.7:
            continue

        # Skip navigation/junk lines
        if (len(line) < 3 or
            line.lower() in ['home', 'menu', 'search', 'login', 'register'] or
            re.match(r'^[\W\s]*$', line)):
            continue

        cleaned_lines.append(line)

    # Remove excessive empty lines
    result = '\n'.join(cleaned_lines)
    result = re.sub(r'\n{3,}', '\n\n', result)

    return result.strip()
convert_to_markdown(element)

Convert HTML element to markdown with fallbacks

Source code in toolboxv2/mods/isaa/extras/web_search.py
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
def convert_to_markdown(element):
    """Convert HTML element to markdown with fallbacks"""

    # Strategy 1: Use html2text
    try:
        import html2text
        h = html2text.HTML2Text()
        h.ignore_links = False
        h.ignore_images = True
        h.body_width = 0
        h.unicode_snob = True
        h.skip_internal_links = True
        h.inline_links = False
        h.decode_errors = 'ignore'

        markdown = h.handle(str(element))
        if markdown and len(markdown.strip()) > 100:
            return markdown
    except ImportError:
        print("html2text not installed")
    except:
        pass

    # Strategy 2: Extract text with basic formatting
    try:
        text_parts = []

        for elem in element.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
            level = int(elem.name[1])
            text_parts.append('#' * level + ' ' + elem.get_text(strip=True))
            elem.replace_with('[HEADING_PLACEHOLDER]')

        for elem in element.find_all('p'):
            text = elem.get_text(strip=True)
            if text:
                text_parts.append(text)
            elem.replace_with('[PARAGRAPH_PLACEHOLDER]')

        # Get remaining text
        remaining_text = element.get_text(separator='\n', strip=True)

        # Combine all text
        all_text = '\n\n'.join(text_parts)
        if remaining_text:
            all_text += '\n\n' + remaining_text

        return all_text

    except:
        pass

    # Strategy 3: Simple text extraction
    return element.get_text(separator='\n', strip=True)
find_main_content(soup)

Find main content using multiple strategies

Source code in toolboxv2/mods/isaa/extras/web_search.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def find_main_content(soup):
    """Find main content using multiple strategies"""

    # Strategy 1: Look for semantic HTML5 elements
    for tag in ['main', 'article']:
        element = soup.find(tag)
        if element and len(element.get_text(strip=True)) > 300:
            return element

    # Strategy 2: Look for common content containers
    content_selectors = [
        '[role="main"]', '.main-content', '#main-content', '.content', '#content',
        '.post-content', '.entry-content', '.article-content', '.blog-content',
        '.story-body', '.article-body', '.post-body'
    ]

    for selector in content_selectors:
        element = soup.select_one(selector)
        if element and len(element.get_text(strip=True)) > 300:
            return element

    # Strategy 3: Find the div with most text content
    divs = soup.find_all('div')
    if divs:
        content_divs = [(div, len(div.get_text(strip=True))) for div in divs]
        content_divs = [(div, length) for div, length in content_divs if length > 300]

        if content_divs:
            content_divs.sort(key=lambda x: x[1], reverse=True)
            return content_divs[0][0]

    # Strategy 4: Use body as fallback
    return soup.find('body')
is_content_parseable(content)

Check if content is properly parsed and readable

Source code in toolboxv2/mods/isaa/extras/web_search.py
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
def is_content_parseable(content: str) -> bool:
    """
    Check if content is properly parsed and readable
    """
    if not content or len(content.strip()) < 50:
        return False

    # Check for too many non-ASCII characters that look like encoding errors
    total_chars = len(content)
    if total_chars == 0:
        return False

    # Count problematic characters
    problematic_chars = 0
    replacement_chars = content.count('�')

    # Check for sequences of garbled characters
    garbled_patterns = [
        r'[ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ]{5,}',
        r'[’“â€�]{3,}',
        r'[\x80-\xff]{4,}',  # High-byte sequences
        r'[^\x00-\x7F\s]{10,}'  # Too many non-ASCII chars in sequence
    ]

    for pattern in garbled_patterns:
        matches = re.findall(pattern, content)
        problematic_chars += sum(len(match) for match in matches)

    # Calculate ratios
    replacement_ratio = replacement_chars / total_chars
    problematic_ratio = problematic_chars / total_chars

    # Check for readable English content
    english_words = re.findall(r'\b[a-zA-Z]{3,}\b', content)
    english_ratio = len(' '.join(english_words)) / total_chars if english_words else 0

    # Criteria for parseable content
    is_parseable = (
        replacement_ratio < 0.05 and  # Less than 5% replacement chars
        problematic_ratio < 0.15 and  # Less than 15% garbled chars
        english_ratio > 0.3 and  # At least 30% English words
        len(english_words) > 10  # At least 10 English words
    )

    if not is_parseable:
        print("Content failed parseability check:")
        print(f"  Replacement ratio: {replacement_ratio:.1%}")
        print(f"  Problematic ratio: {problematic_ratio:.1%}")
        print(f"  English ratio: {english_ratio:.1%}")
        print(f"  English words: {len(english_words)}")

    return is_parseable
is_mostly_readable(text)

Check if text is mostly readable ASCII/common unicode

Source code in toolboxv2/mods/isaa/extras/web_search.py
320
321
322
323
324
325
326
def is_mostly_readable(text: str) -> bool:
    """Check if text is mostly readable ASCII/common unicode"""
    if not text:
        return False

    readable_chars = sum(1 for c in text if c.isprintable() or c.isspace())
    return readable_chars / len(text) > 0.8

Test the robust search functionality

Source code in toolboxv2/mods/isaa/extras/web_search.py
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
def robust_search():
    """Test the robust search functionality"""
    query = "Python web scraping best practices"
    results = web_search(query, max_results=3)

    print(f"\n{'=' * 60}")
    print(f"FINAL RESULTS FOR: '{query}'")
    print(f"{'=' * 60}")

    for i, result in enumerate(results, 1):
        print(f"\n{i}. {result['title']}")
        print(f"URL: {result['url']}")
        print(f"Content length: {len(result['content'])} characters")
        print(f"First 300 chars: {result['content'][:300]}...")

        # Show parseability stats
        content = result['content']
        ascii_ratio = sum(1 for c in content if ord(c) < 128) / len(content)
        print(f"ASCII ratio: {ascii_ratio:.1%}")
        print("-" * 80)
url_to_markdown_robust(url)

Robust URL to markdown converter with multiple encoding strategies

Source code in toolboxv2/mods/isaa/extras/web_search.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
def url_to_markdown_robust(url: str) -> str | None:
    """
    Robust URL to markdown converter with multiple encoding strategies
    """
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Charset': 'utf-8, iso-8859-1;q=0.5',
            'Connection': 'keep-alive'
        }

        response = requests.get(url, headers=headers, timeout=20, allow_redirects=True)
        response.raise_for_status()

        # Quick content type check
        content_type = response.headers.get('content-type', '').lower()
        if not any(ct in content_type for ct in ['text/html', 'text/plain', 'application/xhtml']):
            print(f"Skipping non-HTML content: {content_type}")
            return None

        # Get raw content
        raw_content = response.content

        # Strategy 1: Try response encoding first if it looks reliable
        decoded_content = None
        used_encoding = None

        response_encoding = response.encoding
        if response_encoding and response_encoding.lower() not in ['iso-8859-1', 'ascii']:
            try:
                decoded_content = response.text
                used_encoding = response_encoding
                # Quick test for encoding quality
                if '�' in decoded_content or not is_mostly_readable(decoded_content[:1000]):
                    decoded_content = None
            except:
                pass

        # Strategy 2: Detect encoding from content
        if not decoded_content:
            try:
                import chardet
                detected = chardet.detect(raw_content)
                if detected and detected.get('confidence', 0) > 0.8:
                    decoded_content = raw_content.decode(detected['encoding'])
                    used_encoding = detected['encoding']
                    if '�' in decoded_content or not is_mostly_readable(decoded_content[:1000]):
                        decoded_content = None
            except ImportError and ModuleNotFoundError:
                print("chardet not installed")
            except:
                pass

        # Strategy 3: Extract encoding from HTML meta tags
        if not decoded_content:
            try:
                # Try UTF-8 first to read meta tags
                temp_content = raw_content.decode('utf-8', errors='ignore')[:2048]
                charset_patterns = [
                    r'<meta[^>]+charset["\'\s]*=["\'\s]*([^"\'>\s]+)',
                    r'<meta[^>]+content[^>]+charset=([^"\'>\s;]+)',
                    r'<\?xml[^>]+encoding["\'\s]*=["\'\s]*([^"\'>\s]+)'
                ]

                for pattern in charset_patterns:
                    match = re.search(pattern, temp_content, re.I)
                    if match:
                        encoding = match.group(1).strip().lower()
                        try:
                            decoded_content = raw_content.decode(encoding)
                            used_encoding = encoding
                            if not ('�' in decoded_content or not is_mostly_readable(decoded_content[:1000])):
                                break
                        except:
                            pass
                        decoded_content = None
            except:
                pass

        # Strategy 4: Try common encodings
        if not decoded_content:
            common_encodings = ['utf-8', 'utf-8-sig', 'latin1', 'cp1252', 'iso-8859-1']
            for encoding in common_encodings:
                try:
                    test_content = raw_content.decode(encoding)
                    if is_mostly_readable(test_content[:1000]) and '�' not in test_content[:1000]:
                        decoded_content = test_content
                        used_encoding = encoding
                        break
                except:
                    continue

        # Strategy 5: Last resort with error handling
        if not decoded_content:
            decoded_content = raw_content.decode('utf-8', errors='replace')
            used_encoding = 'utf-8 (with errors)'

        print(f"Used encoding: {used_encoding}")

        # Parse with BeautifulSoup
        soup = BeautifulSoup(decoded_content, 'html.parser')

        # Remove all unwanted elements aggressively
        unwanted_tags = ['script', 'style', 'nav', 'header', 'footer', 'aside', 'iframe',
                         'form', 'button', 'input', 'noscript', 'meta', 'link', 'svg']
        for tag in unwanted_tags:
            for element in soup.find_all(tag):
                element.decompose()

        # Remove elements with unwanted classes/ids
        unwanted_patterns = [
            r'.*ad[s]?[-_].*', r'.*banner.*', r'.*popup.*', r'.*modal.*',
            r'.*cookie.*', r'.*newsletter.*', r'.*social.*', r'.*share.*',
            r'.*comment.*', r'.*sidebar.*', r'.*menu.*', r'.*navigation.*'
        ]

        for pattern in unwanted_patterns:
            for attr in ['class', 'id']:
                for element in soup.find_all(attrs={attr: re.compile(pattern, re.I)}):
                    element.decompose()

        # Find main content with multiple strategies
        main_content = find_main_content(soup)

        if not main_content:
            print("No main content found")
            return None

        # Convert to markdown using multiple strategies
        markdown_content = convert_to_markdown(main_content)

        if not markdown_content:
            print("Markdown conversion failed")
            return None

        # Clean and validate
        cleaned_content = clean_markdown_robust(markdown_content)

        # Final validation
        if not is_content_parseable(cleaned_content):
            print("Content failed parseability check")
            return None

        return cleaned_content

    except Exception as e:
        print(f"Error processing {url}: {e}")
        return None

Main search function with robust fallbacks

Source code in toolboxv2/mods/isaa/extras/web_search.py
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
def web_search(query: str, max_results: int = 5) -> list[dict[str, str]]:
    """
    Main search function with robust fallbacks
    """
    # Try API searches first if available
    api_keys = {
        'serpapi': os.getenv('SERPAPI_API_KEY'),
        'bing': os.getenv('BING_API_KEY')
    }
    if isinstance(max_results, str):
        if max_results.startswith('"') and max_results.endswith('"') or max_results.startswith("'") and max_results.endswith("'"):
            max_results = max_results[1:-1]
        max_results = int(max_results.strip())
    if api_keys:
        for api_name, api_key in api_keys.items():
            if api_key:
                try:
                    print(f"Trying {api_name.upper()} API...")
                    if api_name == 'serpapi':
                        results = web_search_serpapi(query, max_results, api_key)
                    elif api_name == 'bing':
                        results = web_search_bing(query, max_results, api_key)
                    else:
                        continue

                    if results and len(results) >= max_results:
                        return results
                except Exception as e:
                    print(f"{api_name.upper()} API failed: {e}")

    # Use robust DuckDuckGo search
    return web_search_robust(query, max_results)
web_search_bing(query, max_results=5, api_key=None)

Web search using Bing Search API (free tier: 3,000 queries/month) Get your free API key at: https://azure.microsoft.com/en-us/services/cognitive-services/bing-web-search-api/

Source code in toolboxv2/mods/isaa/extras/web_search.py
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def web_search_bing(query: str, max_results: int = 5, api_key: str = None) -> list[dict[str, str]]:
    """
    Web search using Bing Search API (free tier: 3,000 queries/month)
    Get your free API key at: https://azure.microsoft.com/en-us/services/cognitive-services/bing-web-search-api/
    """
    if not api_key:
        print("Please get a free API key from Azure Cognitive Services")
        return []

    try:
        url = "https://api.bing.microsoft.com/v7.0/search"
        headers = {
            "Ocp-Apim-Subscription-Key": api_key
        }
        params = {
            "q": query,
            "count": max_results,
            "textDecorations": False,
            "textFormat": "HTML"
        }

        response = requests.get(url, headers=headers, params=params)
        response.raise_for_status()
        data = response.json()

        results = []
        if "webPages" in data and "value" in data["webPages"]:
            for result in data["webPages"]["value"][:max_results]:
                url_link = result.get("url", "")
                title = result.get("name", "")

                print(f"Processing: {title}")
                markdown_content = url_to_markdown_robust(url_link)

                if markdown_content:
                    results.append({
                        'url': url_link,
                        'title': title,
                        'content': markdown_content
                    })

                # time.sleep(1)

        return results

    except Exception as e:
        print(f"Bing search error: {e}")
        return []
web_search_robust(query, max_results=5, max_attempts=15)

Robust search that keeps trying until it gets enough good results

Source code in toolboxv2/mods/isaa/extras/web_search.py
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
def web_search_robust(query: str, max_results: int = 5, max_attempts: int = 15) -> list[dict[str, str]]:
    """
    Robust search that keeps trying until it gets enough good results
    """
    if isinstance(max_results, str):
        if max_results.startswith('"') and max_results.endswith('"') or max_results.startswith("'") and max_results.endswith("'"):
            max_results = max_results[1:-1]
        max_results = int(max_results.strip())
    if isinstance(max_attempts, str):
        if max_attempts.startswith('"') and max_attempts.endswith('"') or max_attempts.startswith("'") and max_attempts.endswith("'"):
            max_attempts = max_attempts[1:-1]
        max_attempts = int(max_attempts.strip())

    def get_more_search_urls(search_query: str, num_urls: int = 15) -> list[dict[str, str]]:
        """Get more URLs than needed so we can filter out bad ones"""
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
                'Accept': 'text/html,application/xhtml+xml',
                'Accept-Language': 'en-US,en;q=0.9',
            }

            # Try DuckDuckGo lite
            search_url = "https://lite.duckduckgo.com/lite/"
            data = {'q': search_query}

            response = requests.post(search_url, data=data, headers=headers, timeout=15)
            response.raise_for_status()

            soup = BeautifulSoup(response.content, 'html.parser')
            results = []

            for link in soup.find_all('a', href=True):
                href = link.get('href', '')
                text = link.get_text(strip=True)

                if (href.startswith('http') and
                    'duckduckgo.com' not in href and
                    len(text) > 5 and
                    not any(skip in href.lower() for skip in ['ads', 'shopping', 'images'])):

                    results.append({
                        'url': href,
                        'title': text[:150]
                    })

                    if len(results) >= num_urls:
                        break

            return results

        except Exception as e:
            print(f"Search error: {e}")
            return []

    def get_fallback_urls(search_query: str) -> list[dict[str, str]]:
        """Get fallback URLs from known good sites"""
        encoded_query = quote_plus(search_query)
        fallback_urls = [
            f"https://stackoverflow.com/search?q={encoded_query}",
            f"https://www.reddit.com/search/?q={encoded_query}",
            f"https://medium.com/search?q={encoded_query}",
            f"https://dev.to/search?q={encoded_query}",
            f"https://github.com/search?q={encoded_query}&type=repositories",
            f"https://docs.python.org/3/search.html?q={encoded_query}",
            f"https://realpython.com/?s={encoded_query}",
            f"https://towardsdatascience.com/search?q={encoded_query}",
            f"https://www.geeksforgeeks.org/?s={encoded_query}",
            f"https://hackernoon.com/search?query={encoded_query}"
        ]

        return [
            {'url': url, 'title': f"Search results for '{search_query}'"}
            for url in fallback_urls
        ]

    print(f"Searching for: '{query}' (need {max_results} good results)")

    # Get candidate URLs
    candidate_urls = get_more_search_urls(query, max_attempts)

    if not candidate_urls:
        print("Primary search failed, using fallback URLs...")
        candidate_urls = get_fallback_urls(query)

    print(f"Found {len(candidate_urls)} candidate URLs")

    # Process URLs until we have enough good results
    good_results = []
    processed_count = 0

    def task(candidate):
        markdown_content = url_to_markdown_robust(candidate['url'])
        if markdown_content:
            return {
                'url': candidate['url'],
                'title': candidate['title'],
                'content': markdown_content
            }

    # runn all tasks in parallel
    with concurrent.futures.ThreadPoolExecutor() as executor:
        results = list(executor.map(task, candidate_urls))
        processed_count = len(candidate_urls)

    good_results = [result for result in results if result]

    #for candidate in candidate_urls:
    #    if len(good_results) >= max_results:
    #        break

    #    processed_count += 1
    #    print(f"\n[{processed_count}/{len(candidate_urls)}] Processing: {candidate['title'][:80]}...")

    #    markdown_content = url_to_markdown_robust(candidate['url'])

    #    if markdown_content:
    #        good_results.append({
    #            'url': candidate['url'],
    #            'title': candidate['title'],
    #            'content': markdown_content
    #        })
    #        print(f"✅ Success! Got result {len(good_results)}/{max_results}")
    #    else:
    #        print("❌ Skipped (unparseable or low quality)")

    #    # Small delay to be respectful
    #    time.sleep(1.5)

    print(f"\n🎉 Final results: {len(good_results)} good results out of {processed_count} attempted")
    return good_results
web_search_serpapi(query, max_results=5, api_key=None)

Web search using SerpAPI (free tier: 100 searches/month) Get your free API key at: https://serpapi.com/

Source code in toolboxv2/mods/isaa/extras/web_search.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def web_search_serpapi(query: str, max_results: int = 5, api_key: str = None) -> list[dict[str, str]]:
    """
    Web search using SerpAPI (free tier: 100 searches/month)
    Get your free API key at: https://serpapi.com/
    """
    if not api_key:
        print("Please get a free API key from https://serpapi.com/")
        return []

    try:
        url = "https://serpapi.com/search"
        params = {
            "engine": "google",
            "q": query,
            "api_key": api_key,
            "num": max_results
        }

        response = requests.get(url, params=params)
        response.raise_for_status()
        data = response.json()

        results = []
        if "organic_results" in data:
            for result in data["organic_results"][:max_results]:
                url_link = result.get("link", "")
                title = result.get("title", "")

                print(f"Processing: {title}")
                markdown_content = url_to_markdown_robust(url_link)

                if markdown_content:
                    results.append({
                        'url': url_link,
                        'title': title,
                        'content': markdown_content
                    })

                #time.sleep(1)  # Be respectful

        return results

    except Exception as e:
        print(f"SerpAPI search error: {e}")
        return []

module

EnhancedAgentRequestHandler

Bases: BaseHTTPRequestHandler

Enhanced HTTP request handler for standalone server with comprehensive UI support.

Source code in toolboxv2/mods/isaa/module.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
class EnhancedAgentRequestHandler(BaseHTTPRequestHandler):
    """Enhanced HTTP request handler for standalone server with comprehensive UI support."""

    def __init__(self, isaa_mod, agent_id: str, agent, *args, **kwargs):
        self.isaa_mod = isaa_mod
        self.agent_id = agent_id
        self.agent = agent
        super().__init__(*args, **kwargs)

    def do_GET(self):
        """Handle GET requests for enhanced UI and status."""
        parsed_path = urlparse(self.path)

        if parsed_path.path in ['/', '/ui']:
            self._serve_enhanced_ui()
        elif parsed_path.path in ['/api/status', '/api/agent_ui/status', '/status']:
            self._serve_status()
        else:
            self._send_404()

    def do_POST(self):
        """Handle POST requests for enhanced API endpoints."""
        parsed_path = urlparse(self.path)

        if parsed_path.path in ['/api/run', '/api/agent_ui/run_agent']:
            self._handle_run_request()
        elif parsed_path.path in ['/api/reset', '/api/agent_ui/reset_context']:
            self._handle_reset_request()
        else:
            self._send_404()

    def _serve_enhanced_ui(self):
        """Serve the enhanced UI HTML."""
        try:
            html_content = get_agent_ui_html()

            self.send_response(200)
            self.send_header('Content-type', 'text/html')
            self.send_header('Content-Length', str(len(html_content.encode('utf-8'))))
            self.end_headers()
            self.wfile.write(html_content.encode('utf-8'))

        except Exception as e:
            self._send_error_response(500, f"Error serving UI: {str(e)}")

    def _serve_status(self):
        """Serve enhanced status information."""
        try:
            status_info = {
                'agent_id': self.agent_id,
                'agent_name': getattr(self.agent, 'name', 'Unknown'),
                'agent_type': self.agent.__class__.__name__,
                'status': 'active',
                'server_type': 'standalone',
                'timestamp': time.time()
            }

            if hasattr(self.agent, 'status'):
                try:
                    agent_status = self.agent.status()
                    if isinstance(agent_status, dict):
                        status_info['agent_status'] = agent_status
                except:
                    pass

            response_data = json.dumps(status_info).encode('utf-8')

            self.send_response(200)
            self.send_header('Content-type', 'application/json')
            self.send_header('Content-Length', str(len(response_data)))
            self.end_headers()
            self.wfile.write(response_data)

        except Exception as e:
            self._send_error_response(500, f"Error getting status: {str(e)}")

    def _handle_run_request(self):
        """Handle enhanced run requests with comprehensive progress tracking."""
        try:
            content_length = int(self.headers['Content-Length'])
            request_body = self.rfile.read(content_length)
            request_data = json.loads(request_body.decode('utf-8'))

            query = request_data.get('query', '')
            session_id = request_data.get('session_id', f'standalone_{secrets.token_hex(8)}')
            include_progress = request_data.get('include_progress', False)

            if not query:
                self._send_error_response(400, "Missing 'query' field")
                return

            # Run agent with enhanced progress tracking
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

            try:
                progress_tracker = EnhancedProgressTracker()
                progress_events = []
                enhanced_progress = {}

                async def standalone_progress_callback(event: ProgressEvent):
                    if include_progress:
                        progress_data = progress_tracker.extract_progress_data(event)
                        progress_events.append({
                            'timestamp': event.timestamp,
                            'event_type': event.event_type,
                            'status': getattr(event, 'status', 'unknown').value if hasattr(event, 'status') and event.status else 'unknown',
                            'data': event.to_dict()
                        })
                        enhanced_progress.update(progress_data)

                # Set progress callback
                original_callback = getattr(self.agent, 'progress_callback', None)

                if hasattr(self.agent, 'set_progress_callback'):
                    self.agent.set_progress_callback(standalone_progress_callback)
                elif hasattr(self.agent, 'progress_callback'):
                    self.agent.progress_callback = standalone_progress_callback

                # Execute agent
                result = loop.run_until_complete(
                    self.agent.a_run(query=query, session_id=session_id)
                )

                # Restore callback
                if hasattr(self.agent, 'set_progress_callback'):
                    self.agent.set_progress_callback(original_callback)
                elif hasattr(self.agent, 'progress_callback'):
                    self.agent.progress_callback = original_callback

                # Create enhanced response
                response_data = {
                    'success': True,
                    'result': result,
                    'session_id': session_id,
                    'agent_id': self.agent_id,
                    'server_type': 'standalone',
                    'timestamp': time.time()
                }

                if include_progress:
                    response_data.update({
                        'progress_events': progress_events,
                        'enhanced_progress': enhanced_progress,
                        'final_summary': progress_tracker.get_final_summary()
                    })
                self._send_json_response(response_data)

            finally:
                loop.close()

        except Exception as e:
            self._send_error_response(500, f"Execution error: {str(e)}")
            import traceback
            print(traceback.format_exc())

    def _handle_reset_request(self):
        """Handle enhanced reset requests."""
        try:
            success = False
            message = "Reset not supported"

            if hasattr(self.agent, 'clear_context'):
                self.agent.clear_context()
                success = True
                message = "Context reset successfully"
            elif hasattr(self.agent, 'reset'):
                self.agent.reset()
                success = True
                message = "Agent reset successfully"

            response_data = {
                'success': success,
                'message': message,
                'agent_id': self.agent_id,
                'timestamp': time.time()
            }

            self._send_json_response(response_data)

        except Exception as e:
            self._send_error_response(500, f"Reset error: {str(e)}")

    def _send_json_response(self, data: dict):
        """Send JSON response with CORS headers."""
        response_body = json.dumps(data, cls=CustomJSONEncoder).encode('utf-8')

        self.send_response(200)
        self.send_header('Content-type', 'application/json')
        self.send_header('Content-Length', str(len(response_body)))
        self.send_header('Access-Control-Allow-Origin', '*')
        self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
        self.send_header('Access-Control-Allow-Headers', 'Content-Type')
        self.end_headers()
        self.wfile.write(response_body)

    def _send_error_response(self, code: int, message: str):
        """Send error response."""
        error_data = {'success': False, 'error': message, 'code': code}
        response_body = json.dumps(error_data).encode('utf-8')

        self.send_response(code)
        self.send_header('Content-type', 'application/json')
        self.send_header('Content-Length', str(len(response_body)))
        self.end_headers()
        self.wfile.write(response_body)

    def _send_404(self):
        """Send 404 response."""
        self._send_error_response(404, "Not Found")

    def log_message(self, format, *args):
        """Override to reduce logging noise."""
        pass

    def do_OPTIONS(self):
        """Handle preflight CORS requests."""
        self.send_response(200)
        self.send_header('Access-Control-Allow-Origin', '*')
        self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
        self.send_header('Access-Control-Allow-Headers', 'Content-Type')
        self.end_headers()
do_GET()

Handle GET requests for enhanced UI and status.

Source code in toolboxv2/mods/isaa/module.py
105
106
107
108
109
110
111
112
113
114
def do_GET(self):
    """Handle GET requests for enhanced UI and status."""
    parsed_path = urlparse(self.path)

    if parsed_path.path in ['/', '/ui']:
        self._serve_enhanced_ui()
    elif parsed_path.path in ['/api/status', '/api/agent_ui/status', '/status']:
        self._serve_status()
    else:
        self._send_404()
do_OPTIONS()

Handle preflight CORS requests.

Source code in toolboxv2/mods/isaa/module.py
311
312
313
314
315
316
317
def do_OPTIONS(self):
    """Handle preflight CORS requests."""
    self.send_response(200)
    self.send_header('Access-Control-Allow-Origin', '*')
    self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
    self.send_header('Access-Control-Allow-Headers', 'Content-Type')
    self.end_headers()
do_POST()

Handle POST requests for enhanced API endpoints.

Source code in toolboxv2/mods/isaa/module.py
116
117
118
119
120
121
122
123
124
125
def do_POST(self):
    """Handle POST requests for enhanced API endpoints."""
    parsed_path = urlparse(self.path)

    if parsed_path.path in ['/api/run', '/api/agent_ui/run_agent']:
        self._handle_run_request()
    elif parsed_path.path in ['/api/reset', '/api/agent_ui/reset_context']:
        self._handle_reset_request()
    else:
        self._send_404()
log_message(format, *args)

Override to reduce logging noise.

Source code in toolboxv2/mods/isaa/module.py
307
308
309
def log_message(self, format, *args):
    """Override to reduce logging noise."""
    pass
EnhancedProgressTracker

Enhanced progress tracker for detailed UI updates.

Source code in toolboxv2/mods/isaa/module.py
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
class EnhancedProgressTracker:
    """Enhanced progress tracker for detailed UI updates."""

    def __init__(self):
        self.session_state = {}
        self.last_outline_update = None
        self.last_activity_update = None

    def extract_progress_data(self, event: ProgressEvent) -> dict[str, Any]:
        """Extract comprehensive progress data from event."""
        progress_data = {}

        # Outline progress
        if hasattr(event, 'outline_data') or 'outline' in event.metadata:
            outline_info = getattr(event, 'outline_data', event.metadata.get('outline', {}))
            progress_data['outline'] = {
                'current_step': outline_info.get('current_step', 'Unknown'),
                'total_steps': outline_info.get('total_steps', 0),
                'step_name': outline_info.get('step_name', 'Processing'),
                'progress_percentage': outline_info.get('progress_percentage', 0),
                'substeps': outline_info.get('substeps', []),
                'estimated_completion': outline_info.get('estimated_completion')
            }

        # Activity information
        if hasattr(event, 'activity_data') or 'activity' in event.metadata:
            activity_info = getattr(event, 'activity_data', event.metadata.get('activity', {}))
            progress_data['activity'] = {
                'current_action': activity_info.get('current_action', 'Processing'),
                'action_details': activity_info.get('action_details', ''),
                'start_time': activity_info.get('start_time'),
                'elapsed_time': activity_info.get('elapsed_time'),
                'expected_duration': activity_info.get('expected_duration')
            }

        # Meta tool information
        if hasattr(event, 'meta_tool_data') or 'meta_tool' in event.metadata:
            meta_tool_info = getattr(event, 'meta_tool_data', event.metadata.get('meta_tool', {}))
            progress_data['meta_tool'] = {
                'tool_name': meta_tool_info.get('tool_name', 'Unknown'),
                'tool_status': meta_tool_info.get('tool_status', 'active'),
                'tool_input': meta_tool_info.get('tool_input', ''),
                'tool_output': meta_tool_info.get('tool_output', ''),
                'execution_time': meta_tool_info.get('execution_time')
            }

        # System status
        if hasattr(event, 'system_data') or 'system' in event.metadata:
            system_info = getattr(event, 'system_data', event.metadata.get('system', {}))
            progress_data['system'] = {
                'memory_usage': system_info.get('memory_usage', 0),
                'cpu_usage': system_info.get('cpu_usage', 0),
                'active_threads': system_info.get('active_threads', 1),
                'queue_size': system_info.get('queue_size', 0)
            }

        # Graph/workflow information
        if hasattr(event, 'graph_data') or 'graph' in event.metadata:
            graph_info = getattr(event, 'graph_data', event.metadata.get('graph', {}))
            progress_data['graph'] = {
                'current_node': graph_info.get('current_node', 'Unknown'),
                'completed_nodes': graph_info.get('completed_nodes', []),
                'remaining_nodes': graph_info.get('remaining_nodes', []),
                'node_connections': graph_info.get('node_connections', []),
                'execution_path': graph_info.get('execution_path', [])
            }

        return progress_data
extract_progress_data(event)

Extract comprehensive progress data from event.

Source code in toolboxv2/mods/isaa/module.py
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
def extract_progress_data(self, event: ProgressEvent) -> dict[str, Any]:
    """Extract comprehensive progress data from event."""
    progress_data = {}

    # Outline progress
    if hasattr(event, 'outline_data') or 'outline' in event.metadata:
        outline_info = getattr(event, 'outline_data', event.metadata.get('outline', {}))
        progress_data['outline'] = {
            'current_step': outline_info.get('current_step', 'Unknown'),
            'total_steps': outline_info.get('total_steps', 0),
            'step_name': outline_info.get('step_name', 'Processing'),
            'progress_percentage': outline_info.get('progress_percentage', 0),
            'substeps': outline_info.get('substeps', []),
            'estimated_completion': outline_info.get('estimated_completion')
        }

    # Activity information
    if hasattr(event, 'activity_data') or 'activity' in event.metadata:
        activity_info = getattr(event, 'activity_data', event.metadata.get('activity', {}))
        progress_data['activity'] = {
            'current_action': activity_info.get('current_action', 'Processing'),
            'action_details': activity_info.get('action_details', ''),
            'start_time': activity_info.get('start_time'),
            'elapsed_time': activity_info.get('elapsed_time'),
            'expected_duration': activity_info.get('expected_duration')
        }

    # Meta tool information
    if hasattr(event, 'meta_tool_data') or 'meta_tool' in event.metadata:
        meta_tool_info = getattr(event, 'meta_tool_data', event.metadata.get('meta_tool', {}))
        progress_data['meta_tool'] = {
            'tool_name': meta_tool_info.get('tool_name', 'Unknown'),
            'tool_status': meta_tool_info.get('tool_status', 'active'),
            'tool_input': meta_tool_info.get('tool_input', ''),
            'tool_output': meta_tool_info.get('tool_output', ''),
            'execution_time': meta_tool_info.get('execution_time')
        }

    # System status
    if hasattr(event, 'system_data') or 'system' in event.metadata:
        system_info = getattr(event, 'system_data', event.metadata.get('system', {}))
        progress_data['system'] = {
            'memory_usage': system_info.get('memory_usage', 0),
            'cpu_usage': system_info.get('cpu_usage', 0),
            'active_threads': system_info.get('active_threads', 1),
            'queue_size': system_info.get('queue_size', 0)
        }

    # Graph/workflow information
    if hasattr(event, 'graph_data') or 'graph' in event.metadata:
        graph_info = getattr(event, 'graph_data', event.metadata.get('graph', {}))
        progress_data['graph'] = {
            'current_node': graph_info.get('current_node', 'Unknown'),
            'completed_nodes': graph_info.get('completed_nodes', []),
            'remaining_nodes': graph_info.get('remaining_nodes', []),
            'node_connections': graph_info.get('node_connections', []),
            'execution_path': graph_info.get('execution_path', [])
        }

    return progress_data
Tools

Bases: MainTool, FileHandler

Source code in toolboxv2/mods/isaa/module.py
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
class Tools(MainTool, FileHandler):

    def __init__(self, app=None):

        self.run_callback = None
        # self.coding_projects: dict[str, ProjectManager] = {} # Assuming ProjectManager is defined elsewhere or removed
        if app is None:
            app = get_app("isaa-mod")
        self.version = version
        self.name = "isaa"
        self.Name = "isaa"
        self.color = "VIOLET2"
        self.config = {'controller-init': False,
                       'agents-name-list': [], # TODO Remain ComplexModel FastModel BlitzModel, AudioModel, (ImageModel[i/o], VideoModel[i/o]), SummaryModel
                       "FASTMODEL": os.getenv("FASTMODEL", "ollama/llama3.1"),
                       "AUDIOMODEL": os.getenv("AUDIOMODEL", "groq/whisper-large-v3-turbo"),
                       "BLITZMODEL": os.getenv("BLITZMODEL", "ollama/llama3.1"),
                       "COMPLEXMODEL": os.getenv("COMPLEXMODEL", "ollama/llama3.1"),
                       "SUMMARYMODEL": os.getenv("SUMMARYMODEL", "ollama/llama3.1"),
                       "IMAGEMODEL": os.getenv("IMAGEMODEL", "ollama/llama3.1"),
                       "DEFAULTMODELEMBEDDING": os.getenv("DEFAULTMODELEMBEDDING", "gemini/text-embedding-004"),
                       }
        self.per_data = {}
        self.agent_data: dict[str, dict] = {}  # Will store AgentConfig dicts
        self.keys = {
            "KEY": "key~~~~~~~",
            "Config": "config~~~~"
        }
        self.initstate = {}

        extra_path = ""
        if self.toolID:  # MainTool attribute
            extra_path = f"/{self.toolID}"
        self.observation_term_mem_file = f"{app.data_dir}/Memory{extra_path}/observationMemory/"
        self.config['controller_file'] = f"{app.data_dir}{extra_path}/controller.json"
        self.mas_text_summaries_dict = FileCache(folder=f"{app.data_dir}/Memory{extra_path}/summaries/")
        self.tools = {
            "name": "isaa",
            "Version": self.show_version,
            "mini_task_completion": self.mini_task_completion,
            "run_agent": self.run_agent,
            "save_to_mem": self.save_to_mem_sync,
            "get_agent": self.get_agent,
            "format_class": self.format_class,  # Now async
            "get_memory": self.get_memory,
            "rget_mode": lambda mode: self.controller.rget(mode),
        }
        self.tools_interfaces: dict[str, ToolsInterface] = {}
        self.working_directory = os.getenv('ISAA_WORKING_PATH', os.getcwd())
        self.print_stream = stram_print
        self.global_stream_override = False  # Handled by FlowAgentBuilder
        self.lang_chain_tools_dict: dict[str, Any] = {}  # Store actual tool objects for wrapping

        self.agent_memory: AISemanticMemory = f"{app.id}{extra_path}/Memory"  # Path for AISemanticMemory
        self.controller = ControllerManager({})
        self.summarization_mode = 1
        self.summarization_limiter = 102000
        self.speak = lambda x, *args, **kwargs: x  # Placeholder
        self.scripts = Scripts(f"{app.data_dir}{extra_path}/ScriptFile")

        self.default_setter = None  # For agent builder customization
        self.initialized = False

        FileHandler.__init__(self, f"isaa{extra_path.replace('/', '-')}.config", app.id if app else __name__)
        MainTool.__init__(self, load=self.on_start, v=self.version, tool=self.tools,
                          name=self.name, logs=None, color=self.color, on_exit=self.on_exit)

        from .extras.web_search import web_search
        self.web_search = web_search
        self.shell_tool_function = shell_tool_function
        self.tools["shell"] = shell_tool_function

        self.print(f"Start {self.spec}.isaa")
        with Spinner(message="Starting module", symbols='c'):
            self.load_file_handler()
            config_fh = self.get_file_handler(self.keys["Config"])
            if config_fh is not None:
                if isinstance(config_fh, str):
                    try:
                        config_fh = json.loads(config_fh)
                    except json.JSONDecodeError:
                        self.print(f"Warning: Could not parse config from file handler: {config_fh[:100]}...")
                        config_fh = {}

                if isinstance(config_fh, dict):
                    # Merge, prioritizing existing self.config for defaults not in file
                    loaded_config = config_fh
                    for key, value in self.config.items():
                        if key not in loaded_config:
                            loaded_config[key] = value
                    self.config = loaded_config

            if self.spec == 'app':  # MainTool attribute
                self.load_keys_from_env()

            # Ensure directories exist
            Path(f"{get_app('isaa-initIsaa').data_dir}/Agents/").mkdir(parents=True, exist_ok=True)
            Path(f"{get_app('isaa-initIsaa').data_dir}/Memory/").mkdir(parents=True, exist_ok=True)

        #initialize_isaa_webui_module(self.app, self)
        #self.print("ISAA module started. fallback")

    def get_augment(self):
        # This needs to be adapted. Serialization of FlowAgent is through AgentConfig.
        return {
            "Agents": self.serialize_all(),  # Returns dict of AgentConfig dicts
            "customFunctions": json.dumps(self.scripts.scripts),  # Remains same
        }

    async def init_from_augment(self, augment, agent_name: str = 'self'):
        """Initialize from augmented data using new builder system"""

        # Handle agent_name parameter
        if isinstance(agent_name, str):
            pass  # Use string name
        elif hasattr(agent_name, 'config'):  # FlowAgentBuilder
            agent_name = agent_name.config.name
        else:
            raise ValueError(f"Invalid agent_name type: {type(agent_name)}")

        a_keys = augment.keys()

        # Load agent configurations
        if "Agents" in a_keys:
            agents_configs_dict = augment['Agents']
            self.deserialize_all(agents_configs_dict)
            self.print("Agent configurations loaded.")

        # Load custom functions (scripts)
        if "customFunctions" in a_keys:
            custom_functions = augment['customFunctions']
            if isinstance(custom_functions, str):
                custom_functions = json.loads(custom_functions)
            if custom_functions:
                self.scripts.scripts = custom_functions
                self.print("Custom functions loaded")

        # Tools are now handled by the builder system during agent creation
        if "tools" in a_keys:
            self.print("Tool configurations noted - will be applied during agent building")

    async def init_tools(self, tools_config: dict, agent_builder: FlowAgentBuilder):
        # This function needs to be adapted to add tools to the FlowAgentBuilder
        # For LangChain tools, they need to be wrapped as callables or ADK BaseTool instances.
        lc_tools_names = tools_config.get('lagChinTools', [])
        # hf_tools_names = tools_config.get('huggingTools', []) # HuggingFace tools are also LangChain tools
        # plugin_urls = tools_config.get('Plugins', [])

        all_lc_tool_names = list(set(lc_tools_names))  # + hf_tools_names

        for tool_name in all_lc_tool_names:
            try:
                # Load tool instance (LangChain's load_tools might return a list)
                loaded_tools = load_tools([tool_name], llm=None)  # LLM not always needed for tool definition
                for lc_tool_instance in loaded_tools:
                    # Wrap and add to builder
                    # Simple case: wrap lc_tool_instance.run or lc_tool_instance._run
                    if hasattr(lc_tool_instance, 'run') and callable(lc_tool_instance.run):
                        # ADK FunctionTool needs a schema, or infers it.
                        # We might need to manually create Pydantic models for args.
                        # For simplicity, assume ADK can infer or the tool takes simple args.
                        agent_builder.add_tool(lc_tool_instance.run, name=lc_tool_instance.name,
                                                             description=lc_tool_instance.description)
                        self.print(f"Added LangChain tool '{lc_tool_instance.name}' to builder.")
                        self.lang_chain_tools_dict[lc_tool_instance.name] = lc_tool_instance  # Store for reference
            except Exception as e:
                self.print(f"Failed to load/add LangChain tool '{tool_name}': {e}")

        # AIPluginTool needs more complex handling as it's a class
        # for url in plugin_urls:
        #     try:
        #         plugin = AIPluginTool.from_plugin_url(url)
        #         # Exposing AIPluginTool methods might require creating individual FunctionTools
        #         # Or creating a custom ADK BaseTool wrapper for AIPluginTool
        #         self.print(f"AIPluginTool {plugin.name} loaded. Manual ADK wrapping needed.")
        #     except Exception as e:
        #         self.print(f"Failed to load AIPlugin from {url}: {e}")

    def serialize_all(self):
        # Returns a copy of agent_data, which contains AgentConfig dicts
        # The exclude logic might be different if it was excluding fields from old AgentBuilder
        # For AgentConfig, exclusion happens during model_dump if needed.
        return copy.deepcopy(self.agent_data)

    def deserialize_all(self, data: dict[str, dict]):
        # Data is a dict of {agent_name: builder_config_dict}
        self.agent_data.update(data)
        # Clear instances from self.config so they are rebuilt with new configs
        for agent_name in data:
            self.config.pop(f'agent-instance-{agent_name}', None)

    async def init_isaa(self, name='self', build=False, **kwargs):
        if self.initialized:
            self.print(f"Already initialized. Getting agent/builder: {name}")
            # build=True implies getting the builder, build=False (default) implies getting agent instance
            return self.get_agent_builder(name) if build else await self.get_agent(name)

        self.initialized = True
        sys.setrecursionlimit(1500)
        self.load_keys_from_env()

        # Background loading
        self.scripts.load_scripts()

        with Spinner(message="Building Controller", symbols='c'):
            self.controller.init(self.config['controller_file'])
        self.config["controller-init"] = True


        return self.get_agent_builder(name) if build else await self.get_agent(name)

    def show_version(self):
        self.print("Version: ", self.version)
        return self.version

    def on_start(self):

        initialize_isaa_webui_module(self.app, self)

        threading.Thread(target=self.load_to_mem_sync, daemon=True).start()
        self.print("ISAA module started.")

    def load_keys_from_env(self):
        # Update default model names from environment variables
        for key in self.config:
            if key.startswith("DEFAULTMODEL"):
                self.config[key] = os.getenv(key, self.config[key])
        self.config['VAULTS'] = os.getenv("VAULTS")

    def on_exit(self):
        self.app.run_bg_task_advanced(self.cleanup_tools_interfaces)
        # Save agent configurations
        for agent_name, agent_instance in self.config.items():
            if agent_name.startswith('agent-instance-') and agent_instance and isinstance(agent_instance, list) and isinstance(agent_instance[0], FlowAgent):
                self.app.run_bg_task_advanced(asyncio.gather(*[agent_instance.close() for agent_instance in agent_instance]))
                # If agent instance has its own save logic (e.g. cost tracker)
                # asyncio.run(agent_instance.close()) # This might block, consider task group
                # The AgentConfig is already in self.agent_data, which should be saved.
                pass  # Agent instances are not directly saved, their configs are.

        self.scripts.save_scripts()
        threading.Thread(target=self.save_to_mem_sync, daemon=True).start()  # Sync wrapper for save_to_mem

        # Save controller if initialized
        if self.config.get("controller-init"):
            self.controller.save(self.config['controller_file'])

        # Clean up self.config for saving
        clean_config = {}
        for key, value in self.config.items():
            if key.startswith('agent-instance-'): continue  # Don't save instances
            if key.startswith('LLM-model-'): continue  # Don't save langchain models
            clean_config[key] = value
        self.add_to_save_file_handler(self.keys["Config"], json.dumps(clean_config))

        # Save other persistent data
        self.save_file_handler()

    def save_to_mem_sync(self):
        # This used to call agent.save_memory(). FlowAgent does not have this.
        # If AISemanticMemory needs global saving, it should be handled by AISemanticMemory itself.
        # For now, this can be a no-op or save AISemanticMemory instances if managed by Tools.
        memory_instance = self.get_memory()  # Assuming this returns AISemanticMemory
        if hasattr(memory_instance, 'save_all_memories'):  # Hypothetical method
            memory_instance.save_all_memories(f"{get_app().data_dir}/Memory/")
        self.print("Memory saving process initiated")

    def load_to_mem_sync(self):
        # This used to call agent.save_memory(). FlowAgent does not have this.
        # If AISemanticMemory needs global saving, it should be handled by AISemanticMemory itself.
        # For now, this can be a no-op or save AISemanticMemory instances if managed by Tools.
        memory_instance = self.get_memory()  # Assuming this returns AISemanticMemory
        if hasattr(memory_instance, 'load_all_memories'):  # Hypothetical method
            memory_instance.load_all_memories(f"{get_app().data_dir}/Memory/")
        self.print("Memory loading process initiated")

    def get_agent_builder(self, name="self", extra_tools=None) -> FlowAgentBuilder:
        if name == 'None':
            name = "self"

        if extra_tools is None:
            extra_tools = []

        self.print(f"Creating FlowAgentBuilder: {name}")

        # Create builder with agent-specific configuration
        config = AgentConfig(
            name=name,
            fast_llm_model=self.config.get(f'{name.upper()}MODEL', self.config['FASTMODEL']),
            complex_llm_model=self.config.get(f'{name.upper()}MODEL', self.config['COMPLEXMODEL']),
            system_message="You are a production-ready autonomous agent.",
            temperature=0.7,
            max_tokens_output=2048,
            max_tokens_input=32768,
            use_fast_response=True,
            max_parallel_tasks=3,
            verbose_logging=False
        )

        builder = FlowAgentBuilder(config=config)
        builder._isaa_ref = self  # Store ISAA reference

        # Load existing configuration if available
        agent_config_path = Path(f"{get_app().data_dir}/Agents/{name}/agent.json")
        if agent_config_path.exists():
            try:
                builder = FlowAgentBuilder.from_config_file(str(agent_config_path))
                builder._isaa_ref = self
                self.print(f"Loaded existing configuration for builder {name}")
            except Exception as e:
                self.print(f"Failed to load config for {name}: {e}. Using defaults.")

        # Apply global settings
        if self.global_stream_override:
            builder.verbose(True)

        # Apply custom setter if available
        if self.default_setter:
            builder = self.default_setter(builder, name)

        # Initialize ToolsInterface for this agent
        if not hasattr(self, 'tools_interfaces'):
            self.tools_interfaces = {}

        # Create or get existing ToolsInterface for this agent
        if name not in self.tools_interfaces:
            try:
                # Initialize ToolsInterface
                tools_interface = ToolsInterface(
                    session_dir=str(Path(get_app().data_dir) / "Agents" / name / "tools_session"),
                    auto_remove=False,  # Keep session data for agents
                    variables={
                        'agent_name': name,
                        'isaa_instance': self
                    },
                    variable_manager=getattr(self, 'variable_manager', None),
                )

                self.tools_interfaces[name] = tools_interface
                self.print(f"Created ToolsInterface for agent: {name}")

            except Exception as e:
                self.print(f"Failed to create ToolsInterface for {name}: {e}")
                self.tools_interfaces[name] = None

        tools_interface = self.tools_interfaces[name]

        # Add ISAA core tools
        async def run_isaa_agent_tool(target_agent_name: str, instructions: str, **kwargs_):
            if not instructions:
                return "No instructions provided."
            if target_agent_name.startswith('"') and target_agent_name.endswith('"') or target_agent_name.startswith(
                "'") and target_agent_name.endswith("'"):
                target_agent_name = target_agent_name[1:-1]
            return await self.run_agent(target_agent_name, text=instructions, **kwargs_)

        async def memory_search_tool(
            query: str,
            search_mode: str | None = "balanced",
            context_name: str | None = None
        ) -> str:
            """Memory search with configurable precision"""
            mem_instance = self.get_memory()
            memory_names_list = [name.strip() for name in context_name.split(',')] if context_name else None

            search_params = {
                "wide": {"k": 7, "min_similarity": 0.1, "cross_ref_depth": 3, "max_cross_refs": 4, "max_sentences": 8},
                "narrow": {"k": 2, "min_similarity": 0.75, "cross_ref_depth": 1, "max_cross_refs": 1,
                           "max_sentences": 3},
                "balanced": {"k": 3, "min_similarity": 0.2, "cross_ref_depth": 2, "max_cross_refs": 2,
                             "max_sentences": 5}
            }.get(search_mode,
                  {"k": 3, "min_similarity": 0.2, "cross_ref_depth": 2, "max_cross_refs": 2, "max_sentences": 5})

            return await mem_instance.query(
                query=query, memory_names=memory_names_list,
                query_params=search_params, to_str=True
            )

        async def save_to_memory_tool(data_to_save: str, context_name: str = name):
            mem_instance = self.get_memory()
            result = await mem_instance.add_data(context_name, str(data_to_save), direct=True)
            return 'Data added to memory.' if result else 'Error adding data to memory.'

        # Add ISAA core tools
        builder.add_tool(memory_search_tool, "memorySearch", "Search ISAA's semantic memory")
        builder.add_tool(save_to_memory_tool, "saveDataToMemory", "Save data to ISAA's semantic memory")
        builder.add_tool(self.web_search, "searchWeb", "Search the web for information")
        builder.add_tool(self.shell_tool_function, "shell", f"Run shell command in {detect_shell()}")

        # Scripting tools
        # Enhanced tool descriptions for agent understanding
        builder.add_tool(
            self.scripts.run_script,
            "runScript",
            """POWER TOOL: Execute saved scripts to perform complex operations beyond basic functions.

            USE WHEN: You need file processing, data analysis, web scraping, API calls, system operations, mathematical computations, or any task requiring libraries/complex logic. or built-in tools not avalabel

            DON'T USE: For simple text operations, basic math, or tasks you can do with built-in tools. or built-in tools avalabel

            Args: name (required), args (optional - space-separated arguments for the script)
            Example: runScript('web_scraper', 'https://example.com json')"""
        )

        builder.add_tool(
            self.scripts.get_scripts_list,
            "listScripts",
            """View your extended capabilities. Shows all available scripts that enhance your abilities beyond built-in functions. Use this to discover what powerful operations you can perform."""
        )

        builder.add_tool(
            self.scripts.create_script,
            "createScript",
            """CAPABILITY ENHANCER: Create scripts to permanently extend your abilities.

            Python scripts can use external libraries via 'uv' dependency management.
            Shell scripts work cross-platform for system operations.

            WHEN TO CREATE: When you need to repeat complex operations, use external libraries, or perform system-level tasks.

            Args: name, description, content, script_type ('py' or 'sh'), dependencies (optional - for Python: 'requests pandas numpy' format)

            Example: createScript('data_analyzer', 'Analyze CSV data', '...code...', 'py', 'pandas matplotlib')"""
        )

        builder.add_tool(
            self.scripts.remove_script,
            "deleteScript",
            """Remove a script capability. Use when a script is no longer needed or needs to be replaced. Args: name"""
        )

        # Add ToolsInterface tools dynamically
        if tools_interface:
            try:
                # Get all tools from ToolsInterface
                interface_tools = tools_interface.get_tools()

                # Determine which tools to add based on agent name/type
                tool_categories = {
                    'code': ['execute_python', 'execute_rust', 'install_package'],
                    'file': ['write_file', 'replace_in_file', 'read_file', 'list_directory', 'create_directory'],
                    'session': ['get_execution_history', 'clear_session', 'get_variables'],
                    'config': ['set_base_directory', 'set_current_file']
                }

                # Determine which categories to include
                include_categories = set()
                name_lower = name.lower()

                # Code execution for development/coding agents
                if any(keyword in name_lower for keyword in ["dev", "code", "program", "script", "python", "rust", "worker"]):
                    include_categories.update(['code', 'file', 'session', 'config'])

                # Web tools for web-focused agents
                if any(keyword in name_lower for keyword in ["web", "browser", "scrape", "crawl", "extract"]):
                    include_categories.update(['file', 'session'])

                # File tools for file management agents
                if any(keyword in name_lower for keyword in ["file", "fs", "document", "write", "read"]):
                    include_categories.update(['file', 'session', 'config'])

                # Default: add core tools for general agents
                if not include_categories or name == "self":
                    include_categories.update(['code', 'file', 'session', 'config'])

                # Add selected tools
                tools_added = 0
                for tool_func, tool_name, tool_description in interface_tools:
                    # Check if this tool should be included
                    should_include = tool_name in extra_tools

                    if not should_include:
                        for category, tool_names in tool_categories.items():
                            if category in include_categories and tool_name in tool_names:
                                should_include = True
                                break

                    # Always include session management tools
                    if tool_name in ['get_execution_history', 'get_variables']:
                        should_include = True

                    if should_include:
                        try:
                            builder.add_tool(tool_func, tool_name, tool_description)
                            tools_added += 1
                        except Exception as e:
                            self.print(f"Failed to add tool {tool_name}: {e}")

                self.print(f"Added {tools_added} ToolsInterface tools to agent {name}")

            except Exception as e:
                self.print(f"Error adding ToolsInterface tools to {name}: {e}")

        # Configure cost tracking
        builder.with_budget_manager(max_cost=100.0)

        # Store agent configuration
        try:
            agent_dir = Path(f"{get_app().data_dir}/Agents/{name}")
            agent_dir.mkdir(parents=True, exist_ok=True)

            # Save agent metadata
            metadata = {
                'name': name,
                'created_at': time.time(),
                'tools_interface_available': tools_interface is not None,
                'session_dir': str(agent_dir / "tools_session")
            }

            metadata_file = agent_dir / "metadata.json"
            with open(metadata_file, 'w') as f:
                json.dump(metadata, f, indent=2)

        except Exception as e:
            self.print(f"Failed to save agent metadata for {name}: {e}")

        return builder


    def get_tools_interface(self, agent_name: str = "self") -> ToolsInterface | None:
        """
        Get the ToolsInterface instance for a specific agent.

        Args:
            agent_name: Name of the agent

        Returns:
            ToolsInterface instance or None if not found
        """
        if not hasattr(self, 'tools_interfaces'):
            return None

        return self.tools_interfaces.get(agent_name)

    async def configure_tools_interface(self, agent_name: str, **kwargs) -> bool:
        """
        Configure the ToolsInterface for a specific agent.

        Args:
            agent_name: Name of the agent
            **kwargs: Configuration parameters

        Returns:
            True if successful, False otherwise
        """
        tools_interface = self.get_tools_interface(agent_name)
        if not tools_interface:
            self.print(f"No ToolsInterface found for agent {agent_name}")
            return False

        try:
            # Configure based on provided parameters
            if 'base_directory' in kwargs:
                await tools_interface.set_base_directory(kwargs['base_directory'])

            if 'current_file' in kwargs:
                await tools_interface.set_current_file(kwargs['current_file'])

            if 'variables' in kwargs:
                tools_interface.ipython.user_ns.update(kwargs['variables'])

            self.print(f"Configured ToolsInterface for agent {agent_name}")
            return True

        except Exception as e:
            self.print(f"Failed to configure ToolsInterface for {agent_name}: {e}")
            return False

    async def cleanup_tools_interfaces(self):
        """
        Cleanup all ToolsInterface instances.
        """
        if not hasattr(self, 'tools_interfaces'):
            return

        async def cleanup_async():
            for name, tools_interface in self.tools_interfaces.items():
                if tools_interface:
                    try:
                        await tools_interface.__aexit__(None, None, None)
                    except Exception as e:
                        self.print(f"Error cleaning up ToolsInterface for {name}: {e}")

        # Run cleanup
        try:
            await cleanup_async()
            self.tools_interfaces.clear()
            self.print("Cleaned up all ToolsInterface instances")
        except Exception as e:
            self.print(f"Error during ToolsInterface cleanup: {e}")

    async def register_agent(self, agent_builder: FlowAgentBuilder):
        agent_name = agent_builder.config.name

        if f'agent-instance-{agent_name}' in self.config:
            self.print(f"Agent '{agent_name}' instance already exists. Overwriting config and rebuilding on next get.")
            self.config.pop(f'agent-instance-{agent_name}', None)

        # Save the builder's configuration
        config_path = Path(f"{get_app().data_dir}/Agents/{agent_name}/agent.json")
        agent_builder.save_config(str(config_path), format='json')
        self.print(f"Saved FlowAgentBuilder config for '{agent_name}' to {config_path}")

        # Store serializable config in agent_data
        self.agent_data[agent_name] = agent_builder.config.model_dump()

        if agent_name not in self.config.get("agents-name-list", []):
            if "agents-name-list" not in self.config:
                self.config["agents-name-list"] = []
            self.config["agents-name-list"].append(agent_name)

        self.print(f"FlowAgent '{agent_name}' configuration registered. Will be built on first use.")
        row_agent_builder_sto[agent_name] = agent_builder  # Cache builder

    async def get_agent(self, agent_name="Normal", model_override: str | None = None) -> FlowAgent:
        if "agents-name-list" not in self.config:
            self.config["agents-name-list"] = []

        instance_key = f'agent-instance-{agent_name}'
        if instance_key in self.config:
            agent_instance = self.config[instance_key]
            if model_override and agent_instance.amd.fast_llm_model != model_override:
                self.print(f"Model override for {agent_name}: {model_override}. Rebuilding.")
                self.config.pop(instance_key, None)
            else:
                self.print(f"Returning existing FlowAgent instance: {agent_name}")
                return agent_instance

        builder_to_use = None

        # Try to get cached builder first
        if agent_name in row_agent_builder_sto:
            builder_to_use = row_agent_builder_sto[agent_name]
            self.print(f"Using cached builder for {agent_name}")

        # Try to load from stored config
        elif agent_name in self.agent_data:
            self.print(f"Loading configuration for FlowAgent: {agent_name}")
            try:
                config = AgentConfig(**self.agent_data[agent_name])
                builder_to_use = FlowAgentBuilder(config=config)
            except Exception as e:
                self.print(f"Error loading config for {agent_name}: {e}. Falling back to default.")

        # Create default builder if none found
        if builder_to_use is None:
            self.print(f"No existing config for {agent_name}. Creating default builder.")
            builder_to_use = self.get_agent_builder(agent_name)

        # Apply overrides and ensure correct name
        builder_to_use._isaa_ref = self
        if model_override:
            builder_to_use.with_models(model_override, model_override)

        if builder_to_use.config.name != agent_name:
            builder_to_use.with_name(agent_name)

        self.print(
            f"Building FlowAgent: {agent_name} with models {builder_to_use.config.fast_llm_model} - {builder_to_use.config.complex_llm_model}")

        # Build the agent
        agent_instance: FlowAgent = await builder_to_use.build()

        if agent_instance.amd.name == "self":
            self.app.run_bg_task_advanced(agent_instance.initialize_context_awareness)

        if interface := self.get_tools_interface(agent_name):
            interface.variable_manager = agent_instance.variable_manager

        # colletive cabability cahring for reduched reduanda analysis _tool_capabilities
        agent_tool_nams = set(agent_instance.tool_registry.keys())

        tools_data = {}
        for _agent_name in self.config["agents-name-list"]:
            _instance_key = f'agent-instance-{_agent_name}'
            if _instance_key not in self.config:
                if agent_name != "self" and _agent_name == "self":
                    await self.get_agent("self")

            if _instance_key not in self.config:
                continue
            _agent_instance = self.config[_instance_key]
            _agent_tool_nams = set(_agent_instance._tool_capabilities.keys())
            # extract the tool names that are in both agents_registry
            overlap_tool_nams = agent_tool_nams.intersection(_agent_tool_nams)
            _tc = _agent_instance._tool_capabilities
            for tool_name in overlap_tool_nams:
                if tool_name not in _tc:
                    continue
                tools_data[tool_name] = _tc[tool_name]

        agent_instance._tool_capabilities.update(tools_data)
        # Cache the instance and update tracking
        self.config[instance_key] = agent_instance
        if agent_name not in self.agent_data:
            self.agent_data[agent_name] = builder_to_use.config.model_dump()
        if agent_name not in self.config["agents-name-list"]:
            self.config["agents-name-list"].append(agent_name)

        self.print(f"Built and cached FlowAgent instance: {agent_name}")
        return agent_instance

    async def mini_task_completion(self, mini_task: str, user_task: str | None = None, mode: Any = None,  # LLMMode
                                   max_tokens_override: int | None = None, task_from="system",
                                   stream_function: Callable | None = None, message_history: list | None = None, agent_name="TaskCompletion"):
        if mini_task is None: return None
        if agent_name is None: return None
        if mini_task == "test": return "test"
        self.print(f"Running mini task, volume {len(mini_task)}")

        agent = await self.get_agent(agent_name)  # Ensure agent is retrieved (and built if needed)

        effective_system_message = agent.amd.system_message
        if mode and hasattr(mode, 'system_msg') and mode.system_msg:
            effective_system_message = mode.system_msg

        messages = []
        if effective_system_message:
            messages.append({"role": "system", "content": effective_system_message})
        if message_history:
            messages.extend(message_history)

        current_prompt = mini_task
        if user_task:  # If user_task is provided, it becomes the main prompt, mini_task is context
            messages.append({"role": task_from, "content": mini_task})  # mini_task as prior context
            current_prompt = user_task  # user_task as the current prompt

        messages.append({"role": "user", "content": current_prompt})

        # Prepare params for a_run_llm_completion
        llm_params = {"model": agent.amd.fast_llm_model if agent.amd.use_fast_response else agent.amd.complex_llm_model, "messages": messages}
        if max_tokens_override:
            llm_params['max_tokens'] = max_tokens_override
        else:
            llm_params['max_tokens'] = agent.amd.max_tokens

        if stream_function:
            llm_params['stream'] = True
            # FlowAgent a_run_llm_completion handles stream_callback via agent.stream_callback
            # For a one-off, we might need a temporary override or pass it if supported.
            # For now, assume stream_callback is set on agent instance if needed globally.
            # If stream_function is for this call only, agent.a_run_llm_completion needs modification
            # or we use a temporary agent instance. This part is tricky.
            # Let's assume for now that if stream_function is passed, it's a global override for this agent type.
            original_stream_cb = agent.stream_callback
            original_stream_val = agent.stream
            agent.stream_callback = stream_function
            agent.stream = True
            try:
                response_content = await agent.a_run_llm_completion(**llm_params)
            finally:
                agent.stream_callback = original_stream_cb
                agent.stream = original_stream_val  # Reset to builder's config
            return response_content  # Streaming output handled by callback

        llm_params['stream'] = False
        response_content = await agent.a_run_llm_completion(**llm_params)
        return response_content

    async def mini_task_completion_format(self, mini_task, format_schema: type[BaseModel],
                                          max_tokens_override: int | None = None, agent_name="TaskCompletion",
                                          task_from="system", mode_overload: Any = None, user_task: str | None = None, auto_context=False):
        if mini_task is None: return None
        self.print(f"Running formatted mini task, volume {len(mini_task)}")

        agent = await self.get_agent(agent_name)

        effective_system_message = None
        if mode_overload and hasattr(mode_overload, 'system_msg') and mode_overload.system_msg:
            effective_system_message = mode_overload.system_msg

        message_context = []
        if effective_system_message:
            message_context.append({"role": "system", "content": effective_system_message})

        current_prompt = mini_task
        if user_task:
            message_context.append({"role": task_from, "content": mini_task})
            current_prompt = user_task

        # Use agent.a_format_class
        try:
            result_dict = await agent.a_format_class(
                pydantic_model=format_schema,
                prompt=current_prompt,
                message_context=message_context,
                auto_context=auto_context
                # max_tokens can be part of agent's model config or passed if a_format_class supports it
            )
            if format_schema == bool:  # Special handling for boolean schema
                # a_format_class returns a dict, e.g. {"value": True}. Extract the bool.
                # This depends on how bool schema is defined. A common way: class BoolResponse(BaseModel): value: bool
                return result_dict.get("value", False) if isinstance(result_dict, dict) else False
            return result_dict
        except Exception as e:
            self.print(f"Error in mini_task_completion_format: {e}")
            return None  # Or raise

    async def format_class(self, format_schema: type[BaseModel], task: str, agent_name="TaskCompletion", auto_context=False):
        if format_schema is None or not task: return None

        agent = None
        if isinstance(agent_name, str):
            agent = await self.get_agent(agent_name)
        elif isinstance(agent_name, FlowAgent):
            agent = agent_name
        else:
            raise TypeError("agent_name must be str or FlowAgent instance")

        return await agent.a_format_class(format_schema, task, auto_context=auto_context)

    async def run_agent(self, name: str | FlowAgent,
                        text: str,
                        verbose: bool = False,  # Handled by agent's own config mostly
                        session_id: str | None = None,
                        progress_callback: Callable[[Any], None | Awaitable[None]] | None = None,
                        **kwargs):  # Other kwargs for a_run
        if text is None: return ""
        if name is None: return ""
        if text == "test": return ""

        agent_instance = None
        if isinstance(name, str):
            agent_instance = await self.get_agent(name)
        elif isinstance(name, FlowAgent):
            agent_instance = name
        else:
            return self.return_result().default_internal_error(
                f"Invalid agent identifier type: {type(name)}")

        self.print(f"Running agent {agent_instance.amd.name} for task: {text[:100]}...")
        save_p = None
        if progress_callback:
            save_p = agent_instance.progress_callback
            agent_instance.progress_callback = progress_callback

        if verbose:
            agent_instance.verbose = True

        # Call FlowAgent's a_run method
        response = await agent_instance.a_run(
            query=text,
            session_id=session_id,
            user_id=None,
            stream_callback=None

        )
        if save_p:
            agent_instance.progress_callback = save_p

        return response

    # mass_text_summaries and related methods remain complex and depend on AISemanticMemory
    # and specific summarization strategies. For now, keeping their structure,
    # but calls to self.format_class or self.mini_task_completion will become async.

    async def mas_text_summaries(self, text, min_length=36000, ref=None):
        len_text = len(text)
        if len_text < min_length: return text
        key = self.one_way_hash(text, 'summaries', 'isaa')
        value = self.mas_text_summaries_dict.get(key)
        if value is not None: return value

        # This part needs to become async due to format_class
        # Simplified version:
        from .extras.modes import (
            SummarizationMode,
            # crate_llm_function_from_langchain_tools,
        )
        summary = await self.mini_task_completion(
            mini_task=f"Summarize this text, focusing on aspects related to '{ref if ref else 'key details'}'. The text is: {text}",
            mode=self.controller.rget(SummarizationMode))

        if summary is None or not isinstance(summary, str):
            # Fallback or error handling
            summary = text[:min_length] + "... (summarization failed)"

        self.mas_text_summaries_dict.set(key, summary)
        return summary

    def get_memory(self, name: str | None = None) -> AISemanticMemory:
        # This method's logic seems okay, AISemanticMemory is a separate system.
        logger_ = get_logger()  # Renamed to avoid conflict with self.logger
        if isinstance(self.agent_memory, str):  # Path string
            logger_.info(Style.GREYBG("AISemanticMemory Initialized from path"))
            self.agent_memory = AISemanticMemory(base_path=self.agent_memory)

        cm = self.agent_memory
        if name is not None:
            # Assuming AISemanticMemory.get is synchronous or you handle async appropriately
            # If AISemanticMemory methods become async, this needs adjustment
            mem_kb = cm.get(name)  # This might return a list of KnowledgeBase or single one
            return mem_kb
        return cm

    async def host_agent_ui(
        self,
        agent,
        host: str = "0.0.0.0",
        port: int | None = None,
        access: str = 'local',
        registry_server: str | None = None,
        public_name: str | None = None,
        description: str | None = None,
        use_builtin_server: bool = None
    ) -> dict[str, str]:
        """
        Unified agent hosting with WebSocket-enabled UI and optional registry publishing.

        Args:
            agent: Agent or Chain instance to host
            host: Host address (default: 0.0.0.0 for remote access)
            port: Port number (auto-assigned if None)
            access: 'local', 'remote', or 'registry'
            registry_server: Registry server URL for publishing (e.g., "ws://localhost:8080/ws/registry/connect")
            public_name: Public name for registry publishing
            description: Description for registry publishing
            use_builtin_server: Use toolbox built-in server vs standalone Python server

        Returns:
            Dictionary with access URLs and configuration
        """
        use_builtin_server = use_builtin_server or self.app.is_server
        if not hasattr(self, '_hosted_agents'):
            self._hosted_agents = {}

        agent_id = f"agent_{secrets.token_urlsafe(8)}"

        # Generate unique port if not specified
        if not port:
            port = 8765 + len(self._hosted_agents)

        # Store agent reference
        self._hosted_agents[agent_id] = {
            'agent': agent,
            'port': port,
            'host': host,
            'access': access,
            'public_name': public_name or f"Agent_{agent_id}",
            'description': description
        }

        result = {
            'agent_id': agent_id,
            'local_url': f"http://{host}:{port}",
            'status': 'starting'
        }

        if use_builtin_server:
            # Use toolbox built-in server
            result.update(await self._setup_builtin_server_hosting(agent_id, agent, host, port))
        else:
            # Use standalone Python server
            result.update(await self._setup_standalone_server_hosting(agent_id, agent, host, port))

        # Handle registry publishing if requested
        if access in ['remote', 'registry'] and registry_server:
            if not public_name:
                raise ValueError("public_name required for registry publishing")

            registry_result = await self._publish_to_registry(
                agent=agent,
                public_name=public_name,
                registry_server=registry_server,
                description=description,
                agent_id=agent_id
            )
            result.update(registry_result)

        self.app.print(f"🚀 Agent '{result.get('public_name', agent_id)}' hosted successfully!")
        self.app.print(f"   Local UI: {result['local_url']}")
        if 'public_url' in result:
            self.app.print(f"   Public URL: {result['public_url']}")
            self.app.print(f"   API Key: {result.get('api_key', 'N/A')}")

        return result

    # toolboxv2/mods/isaa/__init__.py - Missing Methods

    import asyncio
    import json
    import secrets
    import threading
    import time
    from concurrent.futures import ThreadPoolExecutor
    from http.server import BaseHTTPRequestHandler, HTTPServer
    from urllib.parse import parse_qs, urlparse


    async def _handle_reset_context(self, agent_id: str, agent, conn_id: str):
        """Handle context reset requests from WebSocket UI."""

        try:
            # Reset agent context if supported
            if hasattr(agent, 'clear_context'):
                agent.clear_context()
                message = "Context reset successfully"
                success = True
            else:
                message = "Agent does not support context reset"
                success = False

            # Send response back to UI
            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'reset_response',
                'data': {
                    'success': success,
                    'message': message,
                    'timestamp': time.time()
                }
            })

            self.app.print(f"Context reset requested for agent {agent_id}: {message}")

        except Exception as e:
            error_message = f"Context reset failed: {str(e)}"
            self.app.print(f"Context reset error for agent {agent_id}: {e}")

            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'error',
                'data': {
                    'error': error_message,
                    'timestamp': time.time()
                }
            })

    async def _handle_get_status(self, agent_id: str, agent, conn_id: str):
        """Handle status requests from WebSocket UI."""

        try:
            # Collect agent status information
            status_info = {
                'agent_id': agent_id,
                'agent_name': getattr(agent, 'name', 'Unknown'),
                'agent_type': agent.__class__.__name__,
                'status': 'active',
                'timestamp': time.time(),
                'server_type': 'builtin'
            }

            # Add additional status if available
            if hasattr(agent, 'status'):
                try:
                    agent_status = agent.status()
                    if isinstance(agent_status, dict):
                        status_info.update(agent_status)
                except:
                    pass

            # Add hosted agent info
            if hasattr(self, '_hosted_agents') and agent_id in self._hosted_agents:
                hosted_info = self._hosted_agents[agent_id]
                status_info.update({
                    'host': hosted_info.get('host'),
                    'port': hosted_info.get('port'),
                    'access': hosted_info.get('access'),
                    'public_name': hosted_info.get('public_name')
                })

            # Send status back to UI
            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'status_response',
                'data': status_info
            })

            self.app.print(f"Status requested for agent {agent_id}")

        except Exception as e:
            error_message = f"Status retrieval failed: {str(e)}"
            self.app.print(f"Status error for agent {agent_id}: {e}")

            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'error',
                'data': {
                    'error': error_message,
                    'timestamp': time.time()
                }
            })


    async def stop_hosted_agent(self, agent_id: str = None, port: int = None):
        """Stop a hosted agent by agent_id or port."""

        if not hasattr(self, '_hosted_agents') and not hasattr(self, '_standalone_servers'):
            self.app.print("No hosted agents found")
            return False

        # Stop by agent_id
        if agent_id:
            if hasattr(self, '_hosted_agents') and agent_id in self._hosted_agents:
                agent_info = self._hosted_agents[agent_id]
                agent_port = agent_info.get('port')

                # Stop standalone server if exists
                if hasattr(self, '_standalone_servers') and agent_port in self._standalone_servers:
                    server_info = self._standalone_servers[agent_port]
                    try:
                        server_info['server'].shutdown()
                        self.app.print(f"Stopped standalone server for agent {agent_id}")
                    except:
                        pass

                # Clean up hosted agent info
                del self._hosted_agents[agent_id]
                self.app.print(f"Stopped hosted agent {agent_id}")
                return True

        # Stop by port
        if port:
            if hasattr(self, '_standalone_servers') and port in self._standalone_servers:
                server_info = self._standalone_servers[port]
                try:
                    server_info['server'].shutdown()
                    self.app.print(f"Stopped server on port {port}")
                    return True
                except Exception as e:
                    self.app.print(f"Failed to stop server on port {port}: {e}")
                    return False

        self.app.print("Agent or port not found")
        return False

    async def list_hosted_agents(self) -> dict[str, Any]:
        """List all currently hosted agents."""

        hosted_info = {
            'builtin_agents': {},
            'standalone_agents': {},
            'total_count': 0
        }

        # Built-in server agents
        if hasattr(self, '_hosted_agents'):
            for agent_id, info in self._hosted_agents.items():
                hosted_info['builtin_agents'][agent_id] = {
                    'public_name': info.get('public_name'),
                    'host': info.get('host'),
                    'port': info.get('port'),
                    'access': info.get('access'),
                    'description': info.get('description')
                }

        # Standalone server agents
        if hasattr(self, '_standalone_servers'):
            for port, info in self._standalone_servers.items():
                hosted_info['standalone_agents'][info['agent_id']] = {
                    'port': port,
                    'thread_alive': info['thread'].is_alive(),
                    'server_type': 'standalone'
                }

        hosted_info['total_count'] = len(hosted_info['builtin_agents']) + len(hosted_info['standalone_agents'])

        return hosted_info

    def _create_agent_ws_connect_handler(self, agent_id: str):
        """Create WebSocket connect handler for specific agent."""

        async def on_connect(app, conn_id: str, session: dict):
            if not hasattr(self, '_agent_connections'):
                self._agent_connections = {}

            if agent_id not in self._agent_connections:
                self._agent_connections[agent_id] = set()

            self._agent_connections[agent_id].add(conn_id)

            # Send initial status
            await app.ws_send(conn_id, {
                'event': 'agent_connected',
                'data': {
                    'agent_id': agent_id,
                    'status': 'ready',
                    'capabilities': ['chat', 'progress_tracking', 'real_time_updates']
                }
            })

            self.app.print(f"UI client connected to agent {agent_id}: {conn_id}")

        return on_connect

    def _create_agent_ws_message_handler(self, agent_id: str, agent):
        """Create WebSocket message handler for specific agent."""

        async def on_message(app, conn_id: str, session: dict, payload: dict):
            event = payload.get('event')
            data = payload.get('data', {})

            if event == 'chat_message':
                await self._handle_chat_message(agent_id, agent, conn_id, data)
            elif event == 'reset_context':
                await self._handle_reset_context(agent_id, agent, conn_id)
            elif event == 'get_status':
                await self._handle_get_status(agent_id, agent, conn_id)
            else:
                self.app.print(f"Unknown event from UI: {event}")

        return on_message

    def _create_agent_ws_disconnect_handler(self, agent_id: str):
        """Create WebSocket disconnect handler for specific agent."""

        async def on_disconnect(app, conn_id: str, session: dict = None):
            if hasattr(self, '_agent_connections') and agent_id in self._agent_connections:
                self._agent_connections[agent_id].discard(conn_id)

            self.app.print(f"UI client disconnected from agent {agent_id}: {conn_id}")

        return on_disconnect


    async def _broadcast_to_agent_ui(self, agent_id: str, message: dict):
        """Broadcast message to all UI clients connected to specific agent."""
        if not hasattr(self, '_agent_connections') or agent_id not in self._agent_connections:
            return

        for conn_id in self._agent_connections[agent_id].copy():
            try:
                await self.app.ws_send(conn_id, message)
            except Exception as e:
                self.app.print(f"Failed to send to UI client {conn_id}: {e}")
                self._agent_connections[agent_id].discard(conn_id)

    async def _publish_to_registry(
        self,
        agent,
        public_name: str,
        registry_server: str,
        description: str | None = None,
        agent_id: str | None = None
    ) -> dict[str, str]:
        """Publish agent to registry server."""
        try:
            # Import registry client dynamically to avoid circular imports
            registry_client_module = __import__("toolboxv2.mods.registry.client", fromlist=["get_registry_client"])
            get_registry_client = registry_client_module.get_registry_client

            client = get_registry_client(self.app)

            # Connect if not already connected
            if not client.ws or not client.ws.open:
                await client.connect(registry_server)

            if not client.ws or not client.ws.open:
                raise Exception("Failed to connect to registry server")

            # Register the agent
            reg_info = await client.register(agent, public_name, description)

            if reg_info:
                return {
                    'public_url': reg_info.public_url,
                    'api_key': reg_info.public_api_key,
                    'public_agent_id': reg_info.public_agent_id,
                    'registry_status': 'published'
                }
            else:
                raise Exception("Registration failed")

        except Exception as e:
            self.app.print(f"Registry publishing failed: {e}")
            return {'registry_status': 'failed', 'registry_error': str(e)}

    def _get_enhanced_agent_ui_html(self, agent_id: str) -> str:
        """Get production-ready enhanced UI HTML with comprehensive progress visualization."""
        agent_info = self._hosted_agents.get(agent_id, {})
        server_info = {
            'server_type': 'standalone' if not hasattr(self.app, 'tb') else 'builtin',
            'agent_id': agent_id
        }

        # Update the JavaScript section in the HTML template:
        js_config = f"""
                window.SERVER_CONFIG = {json.dumps(server_info)};
            """
        html_template = """<!DOCTYPE html>
    <html lang="en">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>{agent_name}</title>
        <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
        <style>
            :root {
                --bg-primary: #0d1117;
                --bg-secondary: #161b22;
                --bg-tertiary: #21262d;
                --text-primary: #f0f6fc;
                --text-secondary: #8b949e;
                --text-muted: #6e7681;
                --accent-blue: #58a6ff;
                --accent-green: #3fb950;
                --accent-red: #f85149;
                --accent-orange: #d29922;
                --accent-purple: #a5a5f5;
                --accent-cyan: #39d0d8;
                --border-color: #30363d;
                --shadow: 0 2px 8px rgba(0, 0, 0, 0.3);
            }

            * { margin: 0; padding: 0; box-sizing: border-box; }

            body {
                font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif;
                background: var(--bg-primary);
                color: var(--text-primary);
                height: 100vh;
                display: flex;
                flex-direction: column;
                overflow: hidden;
            }

            .header {
                background: var(--bg-tertiary);
                padding: 12px 20px;
                border-bottom: 1px solid var(--border-color);
                display: flex;
                align-items: center;
                justify-content: space-between;
                box-shadow: var(--shadow);
                z-index: 100;
            }

            .agent-info {
                display: flex;
                align-items: center;
                gap: 16px;
            }

            .agent-title {
                font-size: 18px;
                font-weight: 600;
                color: var(--accent-blue);
            }

            .agent-status {
                display: flex;
                align-items: center;
                gap: 8px;
                font-size: 14px;
            }

            .status-dot {
                width: 10px;
                height: 10px;
                border-radius: 50%;
                background: var(--accent-red);
                animation: pulse 2s infinite;
            }

            .status-dot.connected {
                background: var(--accent-green);
                animation: none;
            }

            .status-dot.processing {
                background: var(--accent-orange);
                animation: pulse 1s infinite;
            }

            @keyframes pulse {
                0%, 100% { opacity: 1; }
                50% { opacity: 0.5; }
            }

            .main-container {
                display: grid;
                grid-template-columns: 2fr 1.5fr 1fr;
                grid-template-rows: 1fr 1fr;
                grid-template-areas:
                    "chat outline activity"
                    "chat system graph";
                flex: 1;
                gap: 1px;
                background: var(--border-color);
                overflow: hidden;
            }

            .panel {
                background: var(--bg-secondary);
                display: flex;
                flex-direction: column;
                overflow: hidden;
            }

            .chat-panel { grid-area: chat; }
            .outline-panel { grid-area: outline; }
            .activity-panel { grid-area: activity; }
            .system-panel { grid-area: system; }
            .graph-panel { grid-area: graph; }

            .panel-header {
                padding: 12px 16px;
                background: var(--bg-tertiary);
                border-bottom: 1px solid var(--border-color);
                font-weight: 600;
                font-size: 12px;
                text-transform: uppercase;
                letter-spacing: 0.5px;
                display: flex;
                align-items: center;
                gap: 8px;
            }

            .panel-content {
                flex: 1;
                overflow-y: auto;
                padding: 12px;
            }

            /* Chat Panel Styles */
            .chat-messages {
                flex: 1;
                overflow-y: auto;
                padding: 16px;
                display: flex;
                flex-direction: column;
                gap: 16px;
            }

            .message {
                display: flex;
                align-items: flex-start;
                gap: 12px;
                max-width: 85%;
            }

            .message.user {
                flex-direction: row-reverse;
                margin-left: auto;
            }

            .message-avatar {
                width: 32px;
                height: 32px;
                border-radius: 50%;
                display: flex;
                align-items: center;
                justify-content: center;
                font-size: 12px;
                font-weight: 600;
                flex-shrink: 0;
            }

            .message.user .message-avatar {
                background: var(--accent-blue);
            }

            .message.agent .message-avatar {
                background: var(--accent-green);
            }

            .message-content {
                padding: 12px 16px;
                border-radius: 12px;
                line-height: 1.5;
                font-size: 14px;
            }

            .message.user .message-content {
                background: var(--accent-blue);
                color: white;
            }

            .message.agent .message-content {
                background: var(--bg-tertiary);
                border: 1px solid var(--border-color);
            }

            .chat-input-area {
                border-top: 1px solid var(--border-color);
                padding: 16px;
                display: flex;
                gap: 12px;
            }

            .chat-input {
                flex: 1;
                background: var(--bg-primary);
                border: 1px solid var(--border-color);
                border-radius: 8px;
                padding: 12px;
                color: var(--text-primary);
                font-size: 14px;
            }

            .chat-input:focus {
                outline: none;
                border-color: var(--accent-blue);
            }

            .send-button {
                background: var(--accent-blue);
                color: white;
                border: none;
                border-radius: 8px;
                padding: 12px 20px;
                cursor: pointer;
                font-weight: 600;
                transition: all 0.2s;
            }

            .send-button:hover:not(:disabled) {
                background: #4493f8;
                transform: translateY(-1px);
            }

            .send-button:disabled {
                opacity: 0.5;
                cursor: not-allowed;
                transform: none;
            }

            /* Progress Indicator */
            .progress-indicator {
                display: none;
                align-items: center;
                gap: 12px;
                padding: 12px 16px;
                background: var(--bg-tertiary);
                border-top: 1px solid var(--border-color);
                font-size: 14px;
            }

            .progress-indicator.active { display: flex; }

            .spinner {
                width: 16px;
                height: 16px;
                border: 2px solid var(--border-color);
                border-top: 2px solid var(--accent-blue);
                border-radius: 50%;
                animation: spin 1s linear infinite;
            }

            @keyframes spin {
                0% { transform: rotate(0deg); }
                100% { transform: rotate(360deg); }
            }

            /* Outline Panel Styles */
            .outline-progress {
                margin-bottom: 16px;
            }

            .outline-header {
                display: flex;
                align-items: center;
                justify-content: space-between;
                margin-bottom: 12px;
            }

            .outline-title {
                font-weight: 600;
                color: var(--accent-cyan);
            }

            .outline-stats {
                font-size: 12px;
                color: var(--text-muted);
            }

            .progress-bar {
                width: 100%;
                height: 6px;
                background: var(--bg-primary);
                border-radius: 3px;
                overflow: hidden;
                margin-bottom: 16px;
            }

            .progress-fill {
                height: 100%;
                background: linear-gradient(90deg, var(--accent-blue), var(--accent-cyan));
                width: 0%;
                transition: width 0.5s ease;
            }

            .outline-steps {
                display: flex;
                flex-direction: column;
                gap: 8px;
            }

            .outline-step {
                display: flex;
                align-items: center;
                gap: 10px;
                padding: 8px 12px;
                border-radius: 6px;
                background: var(--bg-primary);
                border-left: 3px solid var(--border-color);
                transition: all 0.3s;
            }

            .outline-step.active {
                border-left-color: var(--accent-orange);
                background: rgba(217, 153, 34, 0.1);
            }

            .outline-step.completed {
                border-left-color: var(--accent-green);
                background: rgba(63, 185, 80, 0.1);
            }

            .step-icon {
                font-size: 14px;
                width: 16px;
            }

            .step-text {
                flex: 1;
                font-size: 13px;
            }

            .step-method {
                font-size: 11px;
                color: var(--text-muted);
                background: var(--bg-tertiary);
                padding: 2px 6px;
                border-radius: 4px;
            }

            /* Activity Panel Styles */
            .current-activity {
                background: var(--bg-primary);
                border: 1px solid var(--border-color);
                border-radius: 6px;
                padding: 12px;
                margin-bottom: 12px;
            }

            .activity-header {
                display: flex;
                align-items: center;
                gap: 8px;
                margin-bottom: 8px;
            }

            .activity-title {
                font-weight: 600;
                color: var(--accent-orange);
            }

            .activity-duration {
                font-size: 11px;
                color: var(--text-muted);
                background: var(--bg-tertiary);
                padding: 2px 6px;
                border-radius: 4px;
            }

            .activity-description {
                font-size: 13px;
                line-height: 1.4;
                color: var(--text-secondary);
            }

            .meta-tools-list {
                display: flex;
                flex-direction: column;
                gap: 6px;
            }

            .meta-tool {
                display: flex;
                align-items: center;
                gap: 8px;
                padding: 6px 10px;
                background: var(--bg-primary);
                border-radius: 4px;
                font-size: 12px;
            }

            .tool-icon {
                width: 12px;
                text-align: center;
            }

            .tool-name {
                flex: 1;
                color: var(--text-secondary);
            }

            .tool-status {
                font-size: 10px;
                padding: 2px 6px;
                border-radius: 3px;
            }

            .tool-status.running {
                background: var(--accent-orange);
                color: white;
            }

            .tool-status.completed {
                background: var(--accent-green);
                color: white;
            }

            .tool-status.error {
                background: var(--accent-red);
                color: white;
            }

            /* System Panel Styles */
            .system-grid {
                display: grid;
                grid-template-columns: 1fr 2fr;
                gap: 8px 12px;
                font-size: 12px;
            }

            .system-key {
                color: var(--text-muted);
                font-weight: 500;
            }

            .system-value {
                color: var(--text-primary);
                font-family: 'SF Mono', Monaco, monospace;
                word-break: break-word;
            }

            .current-node {
                background: var(--bg-primary);
                padding: 8px 10px;
                border-radius: 6px;
                margin-bottom: 12px;
                border: 1px solid var(--border-color);
            }

            .node-name {
                font-weight: 600;
                color: var(--accent-purple);
                margin-bottom: 4px;
            }

            .node-operation {
                font-size: 11px;
                color: var(--text-muted);
            }

            /* Graph Panel Styles */
            .agent-graph {
                display: flex;
                flex-direction: column;
                align-items: center;
                gap: 8px;
                padding: 8px;
            }

            .graph-node {
                padding: 6px 12px;
                background: var(--bg-primary);
                border: 1px solid var(--border-color);
                border-radius: 6px;
                font-size: 11px;
                text-align: center;
                min-width: 80px;
            }

            .graph-node.active {
                border-color: var(--accent-orange);
                background: rgba(217, 153, 34, 0.1);
            }

            .graph-node.completed {
                border-color: var(--accent-green);
                background: rgba(63, 185, 80, 0.1);
            }

            .graph-arrow {
                color: var(--text-muted);
                font-size: 12px;
            }

            /* Connection Error Styles */
            .connection-error {
                background: var(--accent-red);
                color: white;
                padding: 8px 12px;
                margin: 8px;
                border-radius: 6px;
                font-size: 12px;
                text-align: center;
            }

            .fallback-mode {
                background: var(--accent-orange);
                color: white;
                padding: 8px 12px;
                margin: 8px;
                border-radius: 6px;
                font-size: 12px;
                text-align: center;
            }
        </style>
    </head>
    <body>
        <div class="header">
            <div class="agent-info">
                <div class="agent-title">{agent_name}</div>
                <div class="text-secondary">{agent_description}</div>
            </div>
            <div class="agent-status">
                <div class="status-dot" id="status-dot"></div>
                <span id="status-text">Initializing...</span>
            </div>
        </div>

        <div class="main-container">
            <!-- Chat Panel -->
            <div class="panel chat-panel">
                <div class="panel-header">💬 Conversation</div>
                <div class="chat-messages" id="chat-messages">
                    <div class="message agent">
                        <div class="message-avatar">AI</div>
                        <div class="message-content">Hello! I'm ready to help you. What would you like to know?</div>
                    </div>
                </div>
                <div class="progress-indicator" id="progress-indicator">
                    <div class="spinner"></div>
                    <span id="progress-text">Processing...</span>
                </div>
                <div class="chat-input-area">
                    <input type="text" id="chat-input" class="chat-input" placeholder="Type your message...">
                    <button id="send-button" class="send-button">Send</button>
                </div>
            </div>

            <!-- Outline & Progress Panel -->
            <div class="panel outline-panel">
                <div class="panel-header">📋 Execution Outline</div>
                <div class="panel-content">
                    <div class="outline-progress">
                        <div class="outline-header">
                            <div class="outline-title" id="outline-title">Ready</div>
                            <div class="outline-stats" id="outline-stats">0/0 steps</div>
                        </div>
                        <div class="progress-bar">
                            <div class="progress-fill" id="outline-progress-fill"></div>
                        </div>
                    </div>
                    <div class="outline-steps" id="outline-steps">
                        <div class="outline-step">
                            <div class="step-icon">⏳</div>
                            <div class="step-text">Waiting for query...</div>
                        </div>
                    </div>
                    <div class="current-activity" id="current-activity" style="display: none;">
                        <div class="activity-header">
                            <div class="activity-title" id="activity-title">Current Activity</div>
                            <div class="activity-duration" id="activity-duration">0s</div>
                        </div>
                        <div class="activity-description" id="activity-description"></div>
                    </div>
                </div>
            </div>

            <!-- Activity & Meta-Tools Panel -->
            <div class="panel activity-panel">
                <div class="panel-header">⚙️ Meta-Tool Activity</div>
                <div class="panel-content">
                    <div class="meta-tools-list" id="meta-tools-list">
                        <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 20px;">
                            No activity yet
                        </div>
                    </div>
                </div>
            </div>

            <!-- System Status Panel -->
            <div class="panel system-panel">
                <div class="panel-header">🔧 System Status</div>
                <div class="panel-content">
                    <div class="current-node" id="current-node">
                        <div class="node-name" id="node-name">System</div>
                        <div class="node-operation" id="node-operation">Idle</div>
                    </div>
                    <div class="system-grid" id="system-grid">
                        <div class="system-key">Status</div>
                        <div class="system-value">Ready</div>
                        <div class="system-key">Runtime</div>
                        <div class="system-value">0s</div>
                        <div class="system-key">Events</div>
                        <div class="system-value">0</div>
                        <div class="system-key">Errors</div>
                        <div class="system-value">0</div>
                    </div>
                </div>
            </div>

            <!-- Agent Graph Panel -->
            <div class="panel graph-panel">
                <div class="panel-header">🌐 Agent Flow</div>
                <div class="panel-content">
                    <div class="agent-graph" id="agent-graph">
                        <div class="graph-node">LLMReasonerNode</div>
                        <div class="graph-arrow">↓</div>
                        <div class="graph-node">Ready</div>
                    </div>
                </div>
            </div>
        </div>

        <script unSave="true">
            __SERVER_CONFIG__
            class ProductionAgentUI {
                constructor() {
                    this.ws = null;
                    this.isProcessing = false;
                    this.sessionId = 'ui_session_' + Math.random().toString(36).substr(2, 9);
                    this.startTime = null;
                    this.reconnectAttempts = 0;
                    this.maxReconnectAttempts = 10;
                    this.reconnectDelay = 1000;
                    this.useWebSocket = true;
                    this.fallbackMode = false;

                    // Progress tracking
                    this.currentOutline = null;
                    this.currentActivity = null;
                    this.metaTools = new Map();
                    this.systemStatus = {};
                    this.agentGraph = [];
                    this.progressEvents = [];

                    this.elements = {
                        statusDot: document.getElementById('status-dot'),
                        statusText: document.getElementById('status-text'),
                        chatMessages: document.getElementById('chat-messages'),
                        chatInput: document.getElementById('chat-input'),
                        sendButton: document.getElementById('send-button'),
                        progressIndicator: document.getElementById('progress-indicator'),
                        progressText: document.getElementById('progress-text'),

                        // Outline elements
                        outlineTitle: document.getElementById('outline-title'),
                        outlineStats: document.getElementById('outline-stats'),
                        outlineProgressFill: document.getElementById('outline-progress-fill'),
                        outlineSteps: document.getElementById('outline-steps'),
                        currentActivity: document.getElementById('current-activity'),
                        activityTitle: document.getElementById('activity-title'),
                        activityDuration: document.getElementById('activity-duration'),
                        activityDescription: document.getElementById('activity-description'),

                        // Meta-tools elements
                        metaToolsList: document.getElementById('meta-tools-list'),

                        // System elements
                        currentNode: document.getElementById('current-node'),
                        nodeName: document.getElementById('node-name'),
                        nodeOperation: document.getElementById('node-operation'),
                        systemGrid: document.getElementById('system-grid'),

                        // Graph elements
                        agentGraph: document.getElementById('agent-graph')
                    };
                    this.init();
                }


                init() {

                    this.configureAPIPaths();
                    this.setupEventListeners();
                    this.detectServerMode();
                    this.startStatusUpdates();
                }

                configureAPIPaths() {
                    const serverType = window.SERVER_CONFIG?.server_type || 'standalone';

                    if (serverType === 'builtin') {
                        this.apiPaths = {
                            status: '/api/agent_ui/status',
                            run: '/api/agent_ui/run_agent',
                            reset: '/api/agent_ui/reset_context'
                        };
                        this.useWebSocket = true;
                    } else {
                        this.apiPaths = {
                            status: '/api/status',
                            run: '/api/run',
                            reset: '/api/reset'
                        };
                        this.useWebSocket = false;
                        this.enableFallbackMode();
                    }
                }

                setupEventListeners() {
                    this.elements.sendButton.addEventListener('click', () => this.sendMessage());
                    this.elements.chatInput.addEventListener('keypress', (e) => {
                        if (e.key === 'Enter' && !this.isProcessing) {
                            this.sendMessage();
                        }
                    });

                    // Handle page visibility for reconnection
                    document.addEventListener('visibilitychange', () => {
                        if (!document.hidden && (!this.ws || this.ws.readyState === WebSocket.CLOSED)) {
                            this.connectWebSocket();
                        }
                    });
                }

                detectServerMode() {
                    // Use configured paths instead of hardcoded ones
                    fetch(this.apiPaths.status)
                        .then(response => response.json())
                        .then(data => {
                            this.addLogEntry(`Server detected: ${data.server_type || 'standalone'}`, 'info');
                            if (data.server_type === 'builtin' && this.useWebSocket) {
                                this.connectWebSocket();
                            }
                        })
                        .catch(() => {
                            this.addLogEntry('Server detection failed, using fallback mode', 'error');
                            this.enableFallbackMode();
                        });
                }

                connectWebSocket() {
                    if (!this.useWebSocket) return;

                    try {
                        // Construct WebSocket URL more robustly
                        const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:';
                        const wsUrl = `${protocol}//${window.location.host}/ws/agent_ui/connect`;

                        this.addLogEntry(`Attempting WebSocket connection to: ${wsUrl}`);
                        this.ws = new WebSocket(wsUrl);

                        this.ws.onopen = () => {
                            this.reconnectAttempts = 0;
                            this.fallbackMode = false;
                            this.setStatus('connected', 'Connected');
                            this.addLogEntry('WebSocket connected successfully', 'success');
                            this.removeFallbackIndicators();
                        };

                        this.ws.onmessage = (event) => {
                            try {
                                const message = JSON.parse(event.data);
                                this.handleWebSocketMessage(message);
                            } catch (error) {
                                this.addLogEntry(`WebSocket message parse error: ${error.message}`, 'error');
                            }
                        };

                        this.ws.onclose = (event) => {
                            this.setStatus('disconnected', 'Disconnected');
                            this.addLogEntry(`WebSocket disconnected (code: ${event.code})`, 'error');
                            this.scheduleReconnection();
                        };

                        this.ws.onerror = (error) => {
                            this.setStatus('error', 'Connection Error');
                            this.addLogEntry('WebSocket connection error', 'error');
                            this.scheduleReconnection();
                        };

                    } catch (error) {
                        this.addLogEntry(`WebSocket setup error: ${error.message}`, 'error');
                        this.enableFallbackMode();
                    }
                }

                scheduleReconnection() {
                    if (this.reconnectAttempts >= this.maxReconnectAttempts) {
                        this.addLogEntry('Max reconnection attempts reached, enabling fallback mode', 'error');
                        this.enableFallbackMode();
                        return;
                    }

                    this.reconnectAttempts++;
                    const delay = Math.min(this.reconnectDelay * this.reconnectAttempts, 30000);

                    this.setStatus('error', `Reconnecting in ${delay/1000}s (attempt ${this.reconnectAttempts})`);

                    setTimeout(() => {
                        if (!this.ws || this.ws.readyState === WebSocket.CLOSED) {
                            this.connectWebSocket();
                        }
                    }, delay);
                }

                enableFallbackMode() {
                    this.fallbackMode = true;
                    this.useWebSocket = false;
                    this.setStatus('disconnected', 'Fallback Mode (API Only)');
                    this.showFallbackIndicator();
                    this.addLogEntry('WebSocket unavailable - using API fallback mode', 'info');
                }

                showFallbackIndicator() {
                    const indicator = document.createElement('div');
                    indicator.className = 'fallback-mode';
                    indicator.textContent = 'Using API fallback mode - limited real-time updates';
                    indicator.id = 'fallback-indicator';
                    document.body.appendChild(indicator);
                }

                removeFallbackIndicators() {
                    const indicator = document.getElementById('fallback-indicator');
                    if (indicator) {
                        indicator.remove();
                    }
                }

                handleWebSocketMessage(message) {
                    try {
                        switch (message.event) {
                            case 'agent_connected':
                                this.addLogEntry('Agent ready for interaction', 'success');
                                this.updateSystemStatus({
                                    status: 'Connected',
                                    capabilities: message.data.capabilities
                                });
                                break;

                            case 'processing_start':
                                this.setProcessing(true);
                                this.startTime = Date.now();
                                this.addLogEntry(`Processing: ${message.data.query}`, 'progress');
                                this.resetProgressTracking();
                                break;

                            case 'progress_update':
                                this.handleProgressUpdate(message.data);
                                break;

                            case 'outline_update':
                                this.handleOutlineUpdate(message.data);
                                break;

                            case 'meta_tool_update':
                                this.handleMetaToolUpdate(message.data);
                                break;

                            case 'activity_update':
                                this.handleActivityUpdate(message.data);
                                break;

                            case 'system_update':
                                this.handleSystemUpdate(message.data);
                                break;

                            case 'graph_update':
                                this.handleGraphUpdate(message.data);
                                break;

                            case 'chat_response':
                                this.addMessage('agent', message.data.response);
                                this.setProcessing(false);
                                this.addLogEntry('Response completed', 'success');
                                this.showFinalSummary(message.data);
                                break;

                            case 'error':
                                this.addMessage('agent', `Error: ${message.data.error}`);
                                this.setProcessing(false);
                                this.addLogEntry(`Error: ${message.data.error}`, 'error');
                                break;

                            default:
                                console.log('Unhandled WebSocket message:', message);
                        }
                    } catch (error) {
                        this.addLogEntry(`Message handling error: ${error.message}`, 'error');
                    }
                }

                handleProgressUpdate(data) {
                    this.progressEvents.push(data);

                    const progressText = `${data.event_type}: ${data.status || 'processing'}`;
                    this.elements.progressText.textContent = progressText;

                    // Update based on event type
                    if (data.event_type === 'reasoning_loop') {
                        this.addLogEntry(`🧠 Reasoning loop #${data.loop_number || '?'}`, 'reasoning');
                        this.updateCurrentActivity({
                            title: 'Reasoning',
                            description: data.current_focus || 'Deep thinking in progress',
                            duration: data.time_in_activity || 0
                        });
                    } else if (data.event_type === 'meta_tool_call') {
                        this.addLogEntry(`⚙️ Meta-tool: ${data.meta_tool_name || 'unknown'}`, 'meta-tool');
                    } else {
                        this.addLogEntry(`Progress - ${progressText}`, 'progress');
                    }

                    // Update system status
                    this.updateSystemStatus({
                        current_node: data.node_name,
                        current_operation: data.event_type,
                        runtime: this.getRuntime(),
                        events: this.progressEvents.length
                    });
                }

                handleOutlineUpdate(data) {
                    this.currentOutline = data;

                    if (data.outline_created && data.steps) {
                        this.elements.outlineTitle.textContent = 'Execution Outline';

                        const completedCount = (data.completed_steps || []).length;
                        const totalCount = data.total_steps || data.steps.length;

                        this.elements.outlineStats.textContent = `${completedCount}/${totalCount} steps`;

                        // Update progress bar
                        const progress = totalCount > 0 ? (completedCount / totalCount) * 100 : 0;
                        this.elements.outlineProgressFill.style.width = `${progress}%`;

                        // Update steps
                        this.updateOutlineSteps(data.steps, data.current_step, data.completed_steps || []);

                        this.addLogEntry(`Outline progress: ${completedCount}/${totalCount} steps completed`, 'outline');
                    }
                }

                updateOutlineSteps(steps, currentStep, completedSteps) {
                    this.elements.outlineSteps.innerHTML = '';

                    steps.forEach((step, index) => {
                        const stepEl = document.createElement('div');
                        stepEl.className = 'outline-step';

                        const stepId = step.id || (index + 1);
                        let icon = '⏳';

                        if (completedSteps.includes(stepId)) {
                            stepEl.classList.add('completed');
                            icon = '✅';
                        } else if (stepId === currentStep) {
                            stepEl.classList.add('active');
                            icon = '🔄';
                        }

                        stepEl.innerHTML = `
                            <div class="step-icon">${icon}</div>
                            <div class="step-text">${step.description || `Step ${stepId}`}</div>
                            <div class="step-method">${step.method || 'unknown'}</div>
                        `;

                        this.elements.outlineSteps.appendChild(stepEl);
                    });
                }

                handleMetaToolUpdate(data) {
                    const toolId = `${data.meta_tool_name}_${Date.now()}`;
                    const toolData = {
                        name: data.meta_tool_name,
                        status: data.status || 'running',
                        timestamp: Date.now(),
                        phase: data.execution_phase,
                        data: data
                    };

                    this.metaTools.set(toolId, toolData);
                    this.updateMetaToolsList();

                    // Add to log with appropriate icon
                    const statusIcon = data.status === 'completed' ? '✅' :
                                     data.status === 'error' ? '❌' : '⚙️';
                    this.addLogEntry(`${statusIcon} ${data.meta_tool_name}: ${data.status || 'running'}`, 'meta-tool');
                }

                updateMetaToolsList() {
                    this.elements.metaToolsList.innerHTML = '';

                    if (this.metaTools.size === 0) {
                        this.elements.metaToolsList.innerHTML = `
                            <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 20px;">
                                No meta-tool activity yet
                            </div>
                        `;
                        return;
                    }

                    // Show recent meta-tools (last 8)
                    const recentTools = Array.from(this.metaTools.values())
                        .sort((a, b) => b.timestamp - a.timestamp)
                        .slice(0, 8);

                    recentTools.forEach(tool => {
                        const toolEl = document.createElement('div');
                        toolEl.className = 'meta-tool';

                        const icons = {
                            internal_reasoning: '🧠',
                            delegate_to_llm_tool_node: '🎯',
                            create_and_execute_plan: '📋',
                            manage_internal_task_stack: '📚',
                            advance_outline_step: '➡️',
                            write_to_variables: '💾',
                            read_from_variables: '📖',
                            direct_response: '✨'
                        };

                        const icon = icons[tool.name] || '⚙️';
                        const displayName = tool.name.replace(/_/g, ' ');
                        const age = Math.floor((Date.now() - tool.timestamp) / 1000);

                        toolEl.innerHTML = `
                            <div class="tool-icon">${icon}</div>
                            <div class="tool-name">${displayName} (${age}s ago)</div>
                            <div class="tool-status ${tool.status}">${tool.status}</div>
                        `;

                        this.elements.metaToolsList.appendChild(toolEl);
                    });
                }

                handleActivityUpdate(data) {
                    this.currentActivity = data;
                    this.updateCurrentActivity(data);
                }

                updateCurrentActivity(data) {
                    if (data.primary_activity && data.primary_activity !== 'Unknown') {
                        this.elements.currentActivity.style.display = 'block';
                        this.elements.activityTitle.textContent = data.primary_activity || data.title;

                        const duration = data.time_in_current_activity || data.duration || 0;
                        if (duration > 0) {
                            this.elements.activityDuration.textContent = this.formatDuration(duration);
                        }

                        this.elements.activityDescription.textContent =
                            data.detailed_description || data.description || '';
                    } else {
                        this.elements.currentActivity.style.display = 'none';
                    }
                }

                handleSystemUpdate(data) {
                    this.systemStatus = { ...this.systemStatus, ...data };
                    this.updateSystemStatus(data);
                }

                updateSystemStatus(data) {
                    // Update current node
                    if (data.current_node) {
                        this.elements.nodeName.textContent = data.current_node;
                        this.elements.nodeOperation.textContent = data.current_operation || 'Processing';
                    }

                    // Update system grid
                    const gridData = [
                        ['Status', data.status || this.systemStatus.status || 'Running'],
                        ['Runtime', this.formatDuration(data.runtime || this.getRuntime())],
                        ['Events', data.events || this.progressEvents.length],
                        ['Errors', data.error_count || this.systemStatus.error_count || 0],
                        ['Node', data.current_node || this.systemStatus.current_node || 'Unknown']
                    ];

                    if (data.total_cost !== undefined) {
                        gridData.push(['Cost', `$${data.total_cost.toFixed(4)}`]);
                    }

                    if (data.total_tokens !== undefined) {
                        gridData.push(['Tokens', data.total_tokens.toLocaleString()]);
                    }

                    this.elements.systemGrid.innerHTML = '';
                    gridData.forEach(([key, value]) => {
                        this.elements.systemGrid.innerHTML += `
                            <div class="system-key">${key}</div>
                            <div class="system-value">${value}</div>
                        `;
                    });
                }

                handleGraphUpdate(data) {
                    this.agentGraph = data.nodes || [];
                    this.updateAgentGraph();
                }

                updateAgentGraph() {
                    this.elements.agentGraph.innerHTML = '';

                    if (this.agentGraph.length === 0) {
                        const currentNode = this.systemStatus.current_node || 'LLMReasonerNode';
                        this.elements.agentGraph.innerHTML = `
                            <div class="graph-node active">${currentNode}</div>
                            <div class="graph-arrow">↓</div>
                            <div class="graph-node">Processing</div>
                        `;
                        return;
                    }

                    this.agentGraph.forEach((node, index) => {
                        const nodeEl = document.createElement('div');
                        nodeEl.className = 'graph-node';

                        if (node.active) nodeEl.classList.add('active');
                        if (node.completed) nodeEl.classList.add('completed');

                        nodeEl.textContent = node.name || `Node ${index + 1}`;
                        this.elements.agentGraph.appendChild(nodeEl);

                        if (index < this.agentGraph.length - 1) {
                            const arrow = document.createElement('div');
                            arrow.className = 'graph-arrow';
                            arrow.textContent = '↓';
                            this.elements.agentGraph.appendChild(arrow);
                        }
                    });
                }

                async sendMessage() {
                    const message = this.elements.chatInput.value.trim();
                    if (!message || this.isProcessing) return;

                    this.addMessage('user', message);
                    this.elements.chatInput.value = '';

                    if (this.useWebSocket && this.ws && this.ws.readyState === WebSocket.OPEN) {
                        // Send via WebSocket
                        this.ws.send(JSON.stringify({
                            event: 'chat_message',
                            data: {
                                message: message,
                                session_id: this.sessionId
                            }
                        }));
                    } else {
                        // Fallback to API
                        await this.sendMessageViaAPI(message);
                    }
                }

                async sendMessageViaAPI(message) {
                    this.setProcessing(true);
                    this.startTime = Date.now();
                    this.resetProgressTracking();

                    try {
                        const response = await fetch(this.apiPaths.run, {
                            method: 'POST',
                            headers: {
                                'Content-Type': 'application/json'
                            },
                            body: JSON.stringify({
                                query: message,
                                session_id: this.sessionId,
                                include_progress: true
                            })
                        });

                        const result = await response.json();

                        if (result.success) {
                            this.addMessage('agent', result.result);
                            this.addLogEntry(`Request completed via API`, 'success');

                            // Process progress events if available
                            if (result.progress_events) {
                                this.processAPIProgressEvents(result.progress_events);
                            }

                            // Process enhanced progress if available
                            if (result.enhanced_progress) {
                                this.processEnhancedProgress(result.enhanced_progress);
                            }
                        } else {
                            this.addMessage('agent', `Error: ${result.error}`);
                            this.addLogEntry(`API request failed: ${result.error}`, 'error');
                        }

                    } catch (error) {
                        this.addMessage('agent', `Network error: ${error.message}`);
                        this.addLogEntry(`Network error: ${error.message}`, 'error');
                    } finally {
                        this.setProcessing(false);
                    }
                }

                processAPIProgressEvents(events) {
                    events.forEach(event => {
                        this.handleProgressUpdate(event);
                    });
                }

                processEnhancedProgress(progress) {
                    if (progress.outline) {
                        this.handleOutlineUpdate(progress.outline);
                    }
                    if (progress.activity) {
                        this.handleActivityUpdate(progress.activity);
                    }
                    if (progress.system) {
                        this.handleSystemUpdate(progress.system);
                    }
                    if (progress.graph) {
                        this.handleGraphUpdate(progress.graph);
                    }
                }

                resetProgressTracking() {
                    this.progressEvents = [];
                    this.metaTools.clear();
                    this.updateSystemStatus({ status: 'Processing', events: 0 });
                }

                showFinalSummary(data) {
                    if (data.final_summary) {
                        const summary = data.final_summary;
                        this.addLogEntry(`Final Summary - Outline: ${summary.outline_completed ? 'Complete' : 'Partial'}, Meta-tools: ${summary.total_meta_tools}, Nodes: ${summary.total_nodes}`, 'success');
                    }
                }

                addMessage(sender, content) {
                    const messageEl = document.createElement('div');
                    messageEl.classList.add('message', sender);

                    const avatarEl = document.createElement('div');
                    avatarEl.classList.add('message-avatar');
                    avatarEl.textContent = sender === 'user' ? 'You' : 'AI';

                    const contentEl = document.createElement('div');
                    contentEl.classList.add('message-content');

                    if (sender === 'agent' && window.marked) {
                        try {
                            contentEl.innerHTML = marked.parse(content);
                        } catch (error) {
                            contentEl.textContent = content;
                        }
                    } else {
                        contentEl.textContent = content;
                    }

                    messageEl.appendChild(avatarEl);
                    messageEl.appendChild(contentEl);

                    this.elements.chatMessages.appendChild(messageEl);
                    this.elements.chatMessages.scrollTop = this.elements.chatMessages.scrollHeight;
                }

                addLogEntry(message, type = 'info') {
                    // For debugging - could show in a log panel
                    const timestamp = new Date().toLocaleTimeString();
                    console.log(`[${timestamp}] [${type.toUpperCase()}] ${message}`);
                }

                setStatus(status, text) {
                    this.elements.statusDot.className = `status-dot ${status}`;
                    this.elements.statusText.textContent = text;
                }

                setProcessing(processing) {
                    this.isProcessing = processing;
                    this.elements.sendButton.disabled = processing;
                    this.elements.chatInput.disabled = processing;

                    if (processing) {
                        this.elements.progressIndicator.classList.add('active');
                        this.setStatus('processing', 'Processing');
                    } else {
                        this.elements.progressIndicator.classList.remove('active');
                        this.setStatus(this.ws && this.ws.readyState === WebSocket.OPEN ? 'connected' : 'disconnected',
                                      this.ws && this.ws.readyState === WebSocket.OPEN ? 'Connected' : 'Disconnected');
                        this.startTime = null;
                    }
                }

                formatDuration(seconds) {
                    if (typeof seconds !== 'number') return '0s';
                    if (seconds < 60) return `${seconds.toFixed(1)}s`;
                    if (seconds < 3600) return `${Math.floor(seconds/60)}m${Math.floor(seconds%60)}s`;
                    return `${Math.floor(seconds/3600)}h${Math.floor((seconds%3600)/60)}m`;
                }

                getRuntime() {
                    return this.startTime ? (Date.now() - this.startTime) / 1000 : 0;
                }

                startStatusUpdates() {
                    setInterval(() => {
                        if (this.isProcessing) {
                            this.updateSystemStatus({ runtime: this.getRuntime() });
                        }
                    }, 1000);
                }
            }

            // Initialize the production UI
            if (!window.TB) {

                document.addEventListener('DOMContentLoaded', () => {
                    window.agentUI = new ProductionAgentUI();
                });
            } else {
                TB.once(() => {
                    window.agentUI = new ProductionAgentUI();
                });
            }
        </script>
    </body>
    </html>"""

        return (html_template.
                replace("{agent_name}", agent_info.get('public_name', 'Agent Interface')).
                replace("{agent_description}", agent_info.get('description', '')).
                replace("__SERVER_CONFIG__", js_config)
                )

    async def _handle_chat_message_with_progress_integration(self, agent_id: str, agent, conn_id: str, data: dict):
        """Enhanced chat message handler with ProgressiveTreePrinter integration."""
        query = data.get('message', '')
        session_id = data.get('session_id', f"ui_session_{conn_id}")

        if not query:
            return

        # Create ProgressiveTreePrinter for real-time UI updates
        from toolboxv2.mods.isaa.extras.terminal_progress import (
            ProgressiveTreePrinter,
            VerbosityMode,
        )
        progress_printer = ProgressiveTreePrinter(
            mode=VerbosityMode.STANDARD,
            use_rich=False,
            auto_refresh=False
        )

        # Enhanced progress callback that extracts all UI data
        async def comprehensive_progress_callback(event):
            try:
                # Add event to progress printer for processing
                progress_printer.tree_builder.add_event(event)

                # Get comprehensive summary from the printer
                summary = progress_printer.tree_builder.get_execution_summary()

                # Extract outline information
                outline_info = progress_printer._get_current_outline_info()

                # Extract current activity
                activity_info = progress_printer._get_detailed_current_activity()

                # Extract tool usage
                tool_usage = progress_printer._get_tool_usage_summary()

                # Extract task progress
                task_progress = progress_printer._get_task_executor_progress()

                # Send basic progress update
                await self._broadcast_to_agent_ui(agent_id, {
                    'event': 'progress_update',
                    'data': {
                        'event_type': event.event_type,
                        'status': getattr(event, 'status', 'processing').value if hasattr(event, 'status') and event.status else 'unknown',
                        'node_name': getattr(event, 'node_name', 'Unknown'),
                        'timestamp': event.timestamp,
                        'loop_number': getattr(event.metadata, {}).get('reasoning_loop', 0),
                        'meta_tool_name': getattr(event.metadata, {}).get('meta_tool_name'),
                        'current_focus': getattr(event.metadata, {}).get('current_focus', ''),
                        'time_in_activity': activity_info.get('time_in_current_activity', 0)
                    }
                })

                # Send outline updates
                if outline_info.get('outline_created'):
                    await self._broadcast_to_agent_ui(agent_id, {
                        'event': 'outline_update',
                        'data': outline_info
                    })

                # Send meta-tool updates
                if event.metadata and event.metadata.get('meta_tool_name'):
                    await self._broadcast_to_agent_ui(agent_id, {
                        'event': 'meta_tool_update',
                        'data': {
                            'meta_tool_name': event.metadata['meta_tool_name'],
                            'status': 'completed' if event.success else (
                                'error' if event.success is False else 'running'),
                            'execution_phase': event.metadata.get('execution_phase', 'unknown'),
                            'reasoning_loop': event.metadata.get('reasoning_loop', 0),
                            'timestamp': event.timestamp
                        }
                    })

                # Send activity updates
                if activity_info['primary_activity'] != 'Unknown':
                    await self._broadcast_to_agent_ui(agent_id, {
                        'event': 'activity_update',
                        'data': activity_info
                    })

                # Send system updates
                await self._broadcast_to_agent_ui(agent_id, {
                    'event': 'system_update',
                    'data': {
                        'current_node': summary['execution_flow']['current_node'],
                        'current_operation': activity_info.get('primary_activity', 'Processing'),
                        'status': 'Processing',
                        'runtime': summary['timing']['elapsed'],
                        'total_events': summary['performance_metrics']['total_events'],
                        'error_count': summary['performance_metrics']['error_count'],
                        'total_cost': summary['performance_metrics']['total_cost'],
                        'total_tokens': summary['performance_metrics']['total_tokens'],
                        'completed_nodes': summary['session_info']['completed_nodes'],
                        'total_nodes': summary['session_info']['total_nodes'],
                        'tool_usage': {
                            'tools_used': list(tool_usage.get('tools_used', set())),
                            'tools_active': list(tool_usage.get('tools_active', set())),
                            'current_tool_operation': tool_usage.get('current_tool_operation')
                        }
                    }
                })

                # Send graph updates
                flow_nodes = []
                for node_name in summary['execution_flow']['flow']:
                    if node_name in progress_printer.tree_builder.nodes:
                        node = progress_printer.tree_builder.nodes[node_name]
                        flow_nodes.append({
                            'name': node_name,
                            'active': node_name in summary['execution_flow']['active_nodes'],
                            'completed': (node.status.value == 'completed') if node.status else False,
                            'status': node.status.value if node.status else 'unknown'
                        })

                if flow_nodes:
                    await self._broadcast_to_agent_ui(agent_id, {
                        'event': 'graph_update',
                        'data': {'nodes': flow_nodes}
                    })

            except Exception as e:
                self.app.print(f"Comprehensive progress callback error: {e}")

        # Set progress callback
        original_callback = getattr(agent, 'progress_callback', None)

        try:
            if hasattr(agent, 'set_progress_callback'):
                agent.set_progress_callback(comprehensive_progress_callback)
            elif hasattr(agent, 'progress_callback'):
                agent.progress_callback = comprehensive_progress_callback

            # Send processing start notification
            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'processing_start',
                'data': {'query': query, 'session_id': session_id}
            })

            # Execute agent
            result = await agent.a_run(query=query, session_id=session_id)

            # Get final summary
            final_summary = progress_printer.tree_builder.get_execution_summary()

            # Extract outline information
            outline_info = progress_printer._get_current_outline_info()

            # Initialize outline_info if empty
            if not outline_info or not outline_info.get('steps'):
                outline_info = {
                    'steps': [],
                    'current_step': 1,
                    'completed_steps': [],
                    'total_steps': 0,
                    'step_descriptions': {},
                    'current_step_progress': "",
                    'outline_raw_data': None,
                    'outline_created': False,
                    'actual_step_completions': []
                }

            # Try to infer outline from execution pattern if not found
            if not outline_info.get('outline_created'):
                outline_info = progress_printer._infer_outline_from_execution_pattern(outline_info)

            # Send final result with summary
            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'chat_response',
                'data': {
                    'response': result,
                    'query': query,
                    'session_id': session_id,
                    'completed_at': asyncio.get_event_loop().time(),
                    'final_summary': {
                        'outline_completed': len(outline_info.get('completed_steps', [])) == outline_info.get(
                            'total_steps', 0),
                        'total_meta_tools': len([e for e in progress_printer.tree_builder.nodes.values()
                                                 for event in e.llm_calls + e.sub_events
                                                 if event.metadata and event.metadata.get('meta_tool_name')]),
                        'total_nodes': final_summary['session_info']['total_nodes'],
                        'execution_time': final_summary['timing']['elapsed'],
                        'total_cost': final_summary['performance_metrics']['total_cost']
                    }
                }
            })

        except Exception as e:
            await self._broadcast_to_agent_ui(agent_id, {
                'event': 'error',
                'data': {'error': str(e), 'query': query}
            })
        finally:
            # Restore original callback
            if hasattr(agent, 'set_progress_callback'):
                agent.set_progress_callback(original_callback)
            elif hasattr(agent, 'progress_callback'):
                agent.progress_callback = original_callback

    # Replace the existing method
    async def _handle_chat_message(self, agent_id: str, agent, conn_id: str, data: dict):
        """Delegate to enhanced handler."""
        await self._handle_chat_message_with_progress_integration(agent_id, agent, conn_id, data)

    # Unified publish and host method
    # toolboxv2/mods/isaa/Tools.py

    async def publish_and_host_agent(
        self,
        agent,
        public_name: str,
        registry_server: str = "ws://localhost:8080/ws/registry/connect",
        description: str | None = None,
        access_level: str = "public"
    ) -> dict[str, Any]:
        """FIXED: Mit Debug-Ausgaben für Troubleshooting."""

        if hasattr(agent, 'name') and not hasattr(agent, 'amd') and hasattr(agent, 'a_run'):
            agent.amd = lambda :None
            agent.amd.name = agent.name

        try:
            # Registry Client initialisieren
            from toolboxv2.mods.registry.client import get_registry_client
            registry_client = get_registry_client(self.app)

            self.app.print(f"Connecting to registry server: {registry_server}")
            await registry_client.connect(registry_server)

            # Progress Callback für Live-Updates einrichten
            callback_success = await self.setup_live_progress_callback(agent, registry_client, f"agent_{agent.amd.name}")
            if not callback_success:
                self.app.print("Warning: Progress callback setup failed")
            else:
                self.app.print("✅ Progress callback setup successful")

            # Agent beim Registry registrieren
            self.app.print(f"Registering agent: {public_name}")
            registration_info = await registry_client.register(
                agent_instance=agent,
                public_name=public_name,
                description=description or f"Agent: {public_name}"
            )

            if not registration_info:
                return {"error": "Registration failed", "success": False}

            self.app.print(f"✅ Agent registration successful: {registration_info.public_agent_id}")

            result = {
                "success": True,
                "agent_name": public_name,
                "public_agent_id": registration_info.public_agent_id,
                "public_api_key": registration_info.public_api_key,
                "public_url": registration_info.public_url,
                "registry_server": registry_server,
                "access_level": access_level,
                "ui_url": registration_info.public_url.replace("/api/registry/run", "/api/registry/ui"),
                "websocket_url": registry_server.replace("/connect", "/ui_connect"),
                "status": "registered"
            }

            return result

        except Exception as e:
            self.app.print(f"Failed to publish agent: {e}")
            return {"error": str(e), "success": False}

    # toolboxv2/mods/isaa/Tools.py

    async def setup_live_progress_callback(self, agent, registry_client, agent_id: str = None):
        """Enhanced setup for live progress callback with proper error handling."""

        if not registry_client:
            self.app.print("Warning: No registry client provided for progress updates")
            return False

        if not registry_client.is_connected:
            self.app.print("Warning: Registry client is not connected")
            return False

        progress_tracker = EnhancedProgressTracker()

        # Generate agent ID if not provided
        if not agent_id:
            agent_id = getattr(agent, 'name', f'agent_{id(agent)}')

        async def enhanced_live_progress_callback(event: ProgressEvent):
            """Enhanced progress callback with comprehensive data extraction."""
            try:
                # Validate event
                if not event:
                    self.app.print("Warning: Received null progress event")
                    return

                # Debug output for local development
                event_type = getattr(event, 'event_type', 'unknown')
                status = getattr(event, 'status', 'unknown')
                agent_name = getattr(event, 'agent_name', 'Unknown Agent')

                self.app.print(f"📊 Progress Event: {event_type} | {status} | {agent_name}")

                # Extract comprehensive progress data
                progress_data = progress_tracker.extract_progress_data(event)

                # Prepare enhanced progress message
                ui_progress_data = {
                    "agent_id": agent_id,
                    "event_type": event_type,
                    "status": status.value if hasattr(status, 'value') else str(status),
                    "timestamp": getattr(event, 'timestamp', asyncio.get_event_loop().time()),
                    "agent_name": agent_name,
                    "node_name": getattr(event, 'node_name', 'Unknown'),
                    "session_id": getattr(event, 'session_id', None),

                    # Core event metadata
                    "metadata": {
                        **getattr(event, 'metadata', {}),
                        "event_id": getattr(event, 'event_id', f"evt_{asyncio.get_event_loop().time()}"),
                        "sequence_number": getattr(event, 'sequence_number', 0),
                        "parent_event_id": getattr(event, 'parent_event_id', None)
                    },

                    # Detailed progress data for UI panels
                    "progress_data": progress_data,

                    # UI-specific flags for selective updates
                    "ui_flags": {
                        "should_update_outline": bool(progress_data.get('outline')),
                        "should_update_activity": bool(progress_data.get('activity')),
                        "should_update_meta_tools": bool(progress_data.get('meta_tool')),
                        "should_update_system": bool(progress_data.get('system')),
                        "should_update_graph": bool(progress_data.get('graph')),
                        "is_error": event_type.lower() in ['error', 'exception', 'failed'],
                        "is_completion": event_type.lower() in ['complete', 'finished', 'success'],
                        "requires_user_input": getattr(event, 'requires_user_input', False)
                    },

                    # Performance metrics
                    "performance": {
                        "execution_time": getattr(event, 'execution_time', None),
                        "memory_delta": getattr(event, 'memory_delta', None),
                        "tokens_used": getattr(event, 'tokens_used', None),
                        "api_calls_made": getattr(event, 'api_calls_made', None)
                    }
                }

                # Send live update to registry server
                await registry_client.send_ui_progress(ui_progress_data)

                # Also send agent status update if this is a significant event
                if event_type in ['started', 'completed', 'error', 'paused', 'resumed']:
                    agent_status = 'processing'
                    if event_type == 'completed':
                        agent_status = 'idle'
                    elif event_type == 'error':
                        agent_status = 'error'
                    elif event_type == 'paused':
                        agent_status = 'paused'

                    await registry_client.send_agent_status(
                        agent_id=agent_id,
                        status=agent_status,
                        details={
                            "last_event": event_type,
                            "last_update": ui_progress_data["timestamp"],
                            "current_node": progress_data.get('graph', {}).get('current_node', 'Unknown')
                        }
                    )

                # Log successful progress update
                self.app.print(f"✅ Sent progress update: {event_type} -> Registry Server")

            except Exception as e:
                self.app.print(f"❌ Progress callback error: {e}")
                # Send error notification to UI
                try:
                    await registry_client.send_ui_progress({
                        "agent_id": agent_id,
                        "event_type": "progress_callback_error",
                        "status": "error",
                        "timestamp": asyncio.get_event_loop().time(),
                        "agent_name": getattr(agent, 'name', 'Unknown'),
                        "metadata": {"error": str(e)},
                        "ui_flags": {"is_error": True}
                    })
                except Exception as nested_error:
                    self.app.print(f"Failed to send error notification: {nested_error}")

        # Set up progress callback with enhanced error handling
        callback_set = False

        if hasattr(agent, 'set_progress_callback'):
            try:
                self.app.print(f"🔧 Setting progress callback via set_progress_callback for agent: {agent_id}")
                agent.set_progress_callback(enhanced_live_progress_callback)
                callback_set = True
            except Exception as e:
                self.app.print(f"Failed to set progress callback via set_progress_callback: {e}")

        if not callback_set and hasattr(agent, 'progress_callback'):
            try:
                self.app.print(f"🔧 Setting progress callback via direct assignment for agent: {agent_id}")
                agent.progress_callback = enhanced_live_progress_callback
                callback_set = True
            except Exception as e:
                self.app.print(f"Failed to set progress callback via direct assignment: {e}")

        if not callback_set:
            self.app.print(f"⚠️ Warning: Agent {agent_id} doesn't support progress callbacks")
            return False

        # Send initial agent status
        try:
            await registry_client.send_agent_status(
                agent_id=agent_id,
                status='online',
                details={
                    "progress_callback_enabled": True,
                    "callback_setup_time": asyncio.get_event_loop().time(),
                    "agent_type": type(agent).__name__
                }
            )
            self.app.print(f"✅ Progress callback successfully set up for agent: {agent_id}")
        except Exception as e:
            self.app.print(f"Failed to send initial agent status: {e}")

        return True


    async def _setup_builtin_server_hosting(self, agent_id: str, agent, host, port) -> dict[str, str]:
        """Setup agent hosting using toolbox built-in server with enhanced WebSocket support."""

        # Register WebSocket handlers for this agent
        @self.app.tb(mod_name="agent_ui", websocket_handler="connect")
        def register_agent_ws_handlers(_):
            return {
                "on_connect": self._create_agent_ws_connect_handler(agent_id),
                "on_message": self._create_agent_ws_message_handler(agent_id, agent),
                "on_disconnect": self._create_agent_ws_disconnect_handler(agent_id),
            }

        # Register UI endpoint - now uses enhanced UI
        @self.app.tb(mod_name="agent_ui", api=True, version="1", api_methods=['GET'])
        async def ui():
            return Result.html(
                self._get_enhanced_agent_ui_html(agent_id), row=True
            )

        # Register API endpoint for direct agent interaction
        @self.app.tb(mod_name="agent_ui", api=True, version="1", request_as_kwarg=True, api_methods=['POST'])
        async def run_agent(request: RequestData):
            return await self._handle_direct_agent_run(agent_id, agent, request)

        # Register additional API endpoints for enhanced features
        @self.app.tb(mod_name="agent_ui", api=True, version="1", request_as_kwarg=True, api_methods=['POST'])
        async def reset_context(request: RequestData):
            return await self._handle_api_reset_context(agent_id, agent, request)

        @self.app.tb(mod_name="agent_ui", api=True, version="1", request_as_kwarg=True, api_methods=['GET'])
        async def status(request: RequestData):
            return await self._handle_api_get_status(agent_id, agent, request)

        # WebSocket endpoint URL
        uri = f"{host}:{port}" if port else f"{host}"
        ws_url = f"ws://{uri}/ws/agent_ui/connect"
        ui_url = f"http://{uri}/api/agent_ui/ui"
        api_url = f"http://{uri}/api/agent_ui/run_agent"

        return {
            'ui_url': ui_url,
            'ws_url': ws_url,
            'api_url': api_url,
            'reset_url': f"http://localhost:{self.app.args_sto.port}/api/agent_ui/reset_context",
            'status_url': f"http://localhost:{self.app.args_sto.port}/api/agent_ui/status",
            'server_type': 'builtin',
            'status': 'running'
        }

    async def _setup_standalone_server_hosting(self, agent_id: str, agent, host: str, port: int) -> dict[str, str]:
        """Setup agent hosting using standalone Python HTTP server with enhanced UI support."""

        if not hasattr(self, '_standalone_servers'):
            self._standalone_servers = {}

        if port in self._standalone_servers:
            self.app.print(f"Port {port} is already in use by another agent")
            return {'status': 'error', 'error': f'Port {port} already in use'}

        # Store server info for the handler
        server_info = {
            'agent_id': agent_id,
            'server_type': 'standalone',
            'api_paths': {
                'ui': '/ui',
                'status': '/api/status',
                'run': '/api/run',
                'reset': '/api/reset'
            }
        }

        # Create handler factory with agent reference and server info
        def handler_factory(*args, **kwargs):
            handler = EnhancedAgentRequestHandler(self, agent_id, agent, *args, **kwargs)
            handler.server_info = server_info
            return handler

        # Start HTTP server in separate thread
        def run_server():
            try:
                httpd = HTTPServer((host, port), handler_factory)
                self._standalone_servers[port] = {
                    'server': httpd,
                    'agent_id': agent_id,
                    'thread': threading.current_thread(),
                    'server_info': server_info
                }

                self.app.print(f"Enhanced standalone server for agent '{agent_id}' running on http://{host}:{port}")
                self.app.print(f"  UI: http://{host}:{port}/ui")
                self.app.print(f"  API: http://{host}:{port}/api/run")
                self.app.print(f"  Status: http://{host}:{port}/api/status")

                httpd.serve_forever()

            except Exception as e:
                self.app.print(f"Standalone server failed: {e}")
            finally:
                if port in self._standalone_servers:
                    del self._standalone_servers[port]

        # Start server in daemon thread
        server_thread = threading.Thread(target=run_server, daemon=True)
        server_thread.start()

        # Wait a moment to ensure server starts
        await asyncio.sleep(0.5)

        return {
            'server_type': 'standalone',
            'local_url': f"http://{host}:{port}",
            'ui_url': f"http://{host}:{port}/ui",
            'api_url': f"http://{host}:{port}/api/run",
            'reset_url': f"http://{host}:{port}/api/reset",
            'status_url': f"http://{host}:{port}/api/status",
            'status': 'running',
            'port': port
        }

    async def _handle_direct_agent_run(self, agent_id: str, agent, request_data) -> Result:
        """Handle direct agent API calls with enhanced progress tracking."""

        try:
            # Parse request body
            body = request_data.body if hasattr(request_data, 'body') else {}

            if not isinstance(body, dict):
                return Result.default_user_error("Request body must be JSON object", exec_code=400)

            query = body.get('query', '')
            session_id = body.get('session_id', f'api_{secrets.token_hex(8)}')
            kwargs = body.get('kwargs', {})
            include_progress = body.get('include_progress', True)

            if not query:
                return Result.default_user_error("Missing 'query' field in request body", exec_code=400)

            # Enhanced progress tracking for API
            progress_events = []
            enhanced_progress = {}

            async def enhanced_api_progress_callback(event):
                if include_progress:
                    progress_tracker = EnhancedProgressTracker()
                    progress_data = progress_tracker.extract_progress_data(event)

                    progress_events.append({
                        'timestamp': event.timestamp,
                        'event_type': event.event_type,
                        'status': event.status.value if event.status else 'unknown',
                        'agent_name': event.agent_name,
                        'metadata': event.metadata
                    })

                    # Store enhanced progress data
                    enhanced_progress.update(progress_data)

            # Set progress callback
            original_callback = getattr(agent, 'progress_callback', None)

            try:
                if hasattr(agent, 'set_progress_callback'):
                    agent.set_progress_callback(enhanced_api_progress_callback)
                elif hasattr(agent, 'progress_callback'):
                    agent.progress_callback = enhanced_api_progress_callback

                # Execute agent
                result = await agent.a_run(query=query, session_id=session_id, **kwargs)

                # Return enhanced structured response
                response_data = {
                    'success': True,
                    'result': result,
                    'session_id': session_id,
                    'agent_id': agent_id,
                    'execution_time': time.time()
                }

                if include_progress:
                    response_data.update({
                        'progress_events': progress_events,
                        'enhanced_progress': enhanced_progress,
                        'outline_info': enhanced_progress.get('outline', {}),
                        'system_info': enhanced_progress.get('system', {}),
                        'meta_tools_used': enhanced_progress.get('meta_tools', [])
                    })

                return Result.json(data=response_data)

            except Exception as e:
                self.app.print(f"Agent execution error: {e}")
                return Result.default_internal_error(
                    info=f"Agent execution failed: {str(e)}",
                    exec_code=500
                )
            finally:
                # Restore original callback
                if hasattr(agent, 'set_progress_callback'):
                    agent.set_progress_callback(original_callback)
                elif hasattr(agent, 'progress_callback'):
                    agent.progress_callback = original_callback

        except Exception as e:
            self.app.print(f"Direct agent run error: {e}")
            return Result.default_internal_error(
                info=f"Request processing failed: {str(e)}",
                exec_code=500
            )

    async def _handle_api_reset_context(self, agent_id: str, agent, request_data) -> Result:
        """Handle API context reset requests."""
        try:
            if hasattr(agent, 'clear_context'):
                agent.clear_context()
                message = "Context reset successfully"
                success = True
            elif hasattr(agent, 'reset'):
                agent.reset()
                message = "Agent reset successfully"
                success = True
            else:
                message = "Agent does not support context reset"
                success = False

            return Result.json(data={
                'success': success,
                'message': message,
                'agent_id': agent_id,
                'timestamp': time.time()
            })

        except Exception as e:
            return Result.default_internal_error(
                info=f"Context reset failed: {str(e)}",
                exec_code=500
            )

    async def _handle_api_get_status(self, agent_id: str, agent, request_data) -> Result:
        """Handle API status requests."""
        try:
            # Collect comprehensive agent status
            status_info = {
                'agent_id': agent_id,
                'agent_name': getattr(agent, 'name', 'Unknown'),
                'agent_type': agent.__class__.__name__,
                'status': 'active',
                'timestamp': time.time(),
                'server_type': 'api'
            }

            # Add agent-specific status
            if hasattr(agent, 'status'):
                try:
                    agent_status = agent.status()
                    if isinstance(agent_status, dict):
                        status_info['agent_status'] = agent_status
                except:
                    pass

            # Add hosted agent info
            if hasattr(self, '_hosted_agents') and agent_id in self._hosted_agents:
                hosted_info = self._hosted_agents[agent_id]
                status_info.update({
                    'host': hosted_info.get('host'),
                    'port': hosted_info.get('port'),
                    'access': hosted_info.get('access'),
                    'public_name': hosted_info.get('public_name'),
                    'description': hosted_info.get('description')
                })

            # Add connection info
            connection_count = 0
            if hasattr(self, '_agent_connections') and agent_id in self._agent_connections:
                connection_count = len(self._agent_connections[agent_id])

            status_info['active_connections'] = connection_count

            return Result.json(data=status_info)

        except Exception as e:
            return Result.default_internal_error(
                info=f"Status retrieval failed: {str(e)}",
                exec_code=500
            )
cleanup_tools_interfaces() async

Cleanup all ToolsInterface instances.

Source code in toolboxv2/mods/isaa/module.py
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
async def cleanup_tools_interfaces(self):
    """
    Cleanup all ToolsInterface instances.
    """
    if not hasattr(self, 'tools_interfaces'):
        return

    async def cleanup_async():
        for name, tools_interface in self.tools_interfaces.items():
            if tools_interface:
                try:
                    await tools_interface.__aexit__(None, None, None)
                except Exception as e:
                    self.print(f"Error cleaning up ToolsInterface for {name}: {e}")

    # Run cleanup
    try:
        await cleanup_async()
        self.tools_interfaces.clear()
        self.print("Cleaned up all ToolsInterface instances")
    except Exception as e:
        self.print(f"Error during ToolsInterface cleanup: {e}")
configure_tools_interface(agent_name, **kwargs) async

Configure the ToolsInterface for a specific agent.

Parameters:

Name Type Description Default
agent_name str

Name of the agent

required
**kwargs

Configuration parameters

{}

Returns:

Type Description
bool

True if successful, False otherwise

Source code in toolboxv2/mods/isaa/module.py
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
async def configure_tools_interface(self, agent_name: str, **kwargs) -> bool:
    """
    Configure the ToolsInterface for a specific agent.

    Args:
        agent_name: Name of the agent
        **kwargs: Configuration parameters

    Returns:
        True if successful, False otherwise
    """
    tools_interface = self.get_tools_interface(agent_name)
    if not tools_interface:
        self.print(f"No ToolsInterface found for agent {agent_name}")
        return False

    try:
        # Configure based on provided parameters
        if 'base_directory' in kwargs:
            await tools_interface.set_base_directory(kwargs['base_directory'])

        if 'current_file' in kwargs:
            await tools_interface.set_current_file(kwargs['current_file'])

        if 'variables' in kwargs:
            tools_interface.ipython.user_ns.update(kwargs['variables'])

        self.print(f"Configured ToolsInterface for agent {agent_name}")
        return True

    except Exception as e:
        self.print(f"Failed to configure ToolsInterface for {agent_name}: {e}")
        return False
get_tools_interface(agent_name='self')

Get the ToolsInterface instance for a specific agent.

Parameters:

Name Type Description Default
agent_name str

Name of the agent

'self'

Returns:

Type Description
ToolsInterface | None

ToolsInterface instance or None if not found

Source code in toolboxv2/mods/isaa/module.py
908
909
910
911
912
913
914
915
916
917
918
919
920
921
def get_tools_interface(self, agent_name: str = "self") -> ToolsInterface | None:
    """
    Get the ToolsInterface instance for a specific agent.

    Args:
        agent_name: Name of the agent

    Returns:
        ToolsInterface instance or None if not found
    """
    if not hasattr(self, 'tools_interfaces'):
        return None

    return self.tools_interfaces.get(agent_name)
host_agent_ui(agent, host='0.0.0.0', port=None, access='local', registry_server=None, public_name=None, description=None, use_builtin_server=None) async

Unified agent hosting with WebSocket-enabled UI and optional registry publishing.

Parameters:

Name Type Description Default
agent

Agent or Chain instance to host

required
host str

Host address (default: 0.0.0.0 for remote access)

'0.0.0.0'
port int | None

Port number (auto-assigned if None)

None
access str

'local', 'remote', or 'registry'

'local'
registry_server str | None

Registry server URL for publishing (e.g., "ws://localhost:8080/ws/registry/connect")

None
public_name str | None

Public name for registry publishing

None
description str | None

Description for registry publishing

None
use_builtin_server bool

Use toolbox built-in server vs standalone Python server

None

Returns:

Type Description
dict[str, str]

Dictionary with access URLs and configuration

Source code in toolboxv2/mods/isaa/module.py
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
async def host_agent_ui(
    self,
    agent,
    host: str = "0.0.0.0",
    port: int | None = None,
    access: str = 'local',
    registry_server: str | None = None,
    public_name: str | None = None,
    description: str | None = None,
    use_builtin_server: bool = None
) -> dict[str, str]:
    """
    Unified agent hosting with WebSocket-enabled UI and optional registry publishing.

    Args:
        agent: Agent or Chain instance to host
        host: Host address (default: 0.0.0.0 for remote access)
        port: Port number (auto-assigned if None)
        access: 'local', 'remote', or 'registry'
        registry_server: Registry server URL for publishing (e.g., "ws://localhost:8080/ws/registry/connect")
        public_name: Public name for registry publishing
        description: Description for registry publishing
        use_builtin_server: Use toolbox built-in server vs standalone Python server

    Returns:
        Dictionary with access URLs and configuration
    """
    use_builtin_server = use_builtin_server or self.app.is_server
    if not hasattr(self, '_hosted_agents'):
        self._hosted_agents = {}

    agent_id = f"agent_{secrets.token_urlsafe(8)}"

    # Generate unique port if not specified
    if not port:
        port = 8765 + len(self._hosted_agents)

    # Store agent reference
    self._hosted_agents[agent_id] = {
        'agent': agent,
        'port': port,
        'host': host,
        'access': access,
        'public_name': public_name or f"Agent_{agent_id}",
        'description': description
    }

    result = {
        'agent_id': agent_id,
        'local_url': f"http://{host}:{port}",
        'status': 'starting'
    }

    if use_builtin_server:
        # Use toolbox built-in server
        result.update(await self._setup_builtin_server_hosting(agent_id, agent, host, port))
    else:
        # Use standalone Python server
        result.update(await self._setup_standalone_server_hosting(agent_id, agent, host, port))

    # Handle registry publishing if requested
    if access in ['remote', 'registry'] and registry_server:
        if not public_name:
            raise ValueError("public_name required for registry publishing")

        registry_result = await self._publish_to_registry(
            agent=agent,
            public_name=public_name,
            registry_server=registry_server,
            description=description,
            agent_id=agent_id
        )
        result.update(registry_result)

    self.app.print(f"🚀 Agent '{result.get('public_name', agent_id)}' hosted successfully!")
    self.app.print(f"   Local UI: {result['local_url']}")
    if 'public_url' in result:
        self.app.print(f"   Public URL: {result['public_url']}")
        self.app.print(f"   API Key: {result.get('api_key', 'N/A')}")

    return result
init_from_augment(augment, agent_name='self') async

Initialize from augmented data using new builder system

Source code in toolboxv2/mods/isaa/module.py
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
async def init_from_augment(self, augment, agent_name: str = 'self'):
    """Initialize from augmented data using new builder system"""

    # Handle agent_name parameter
    if isinstance(agent_name, str):
        pass  # Use string name
    elif hasattr(agent_name, 'config'):  # FlowAgentBuilder
        agent_name = agent_name.config.name
    else:
        raise ValueError(f"Invalid agent_name type: {type(agent_name)}")

    a_keys = augment.keys()

    # Load agent configurations
    if "Agents" in a_keys:
        agents_configs_dict = augment['Agents']
        self.deserialize_all(agents_configs_dict)
        self.print("Agent configurations loaded.")

    # Load custom functions (scripts)
    if "customFunctions" in a_keys:
        custom_functions = augment['customFunctions']
        if isinstance(custom_functions, str):
            custom_functions = json.loads(custom_functions)
        if custom_functions:
            self.scripts.scripts = custom_functions
            self.print("Custom functions loaded")

    # Tools are now handled by the builder system during agent creation
    if "tools" in a_keys:
        self.print("Tool configurations noted - will be applied during agent building")
list_hosted_agents() async

List all currently hosted agents.

Source code in toolboxv2/mods/isaa/module.py
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
async def list_hosted_agents(self) -> dict[str, Any]:
    """List all currently hosted agents."""

    hosted_info = {
        'builtin_agents': {},
        'standalone_agents': {},
        'total_count': 0
    }

    # Built-in server agents
    if hasattr(self, '_hosted_agents'):
        for agent_id, info in self._hosted_agents.items():
            hosted_info['builtin_agents'][agent_id] = {
                'public_name': info.get('public_name'),
                'host': info.get('host'),
                'port': info.get('port'),
                'access': info.get('access'),
                'description': info.get('description')
            }

    # Standalone server agents
    if hasattr(self, '_standalone_servers'):
        for port, info in self._standalone_servers.items():
            hosted_info['standalone_agents'][info['agent_id']] = {
                'port': port,
                'thread_alive': info['thread'].is_alive(),
                'server_type': 'standalone'
            }

    hosted_info['total_count'] = len(hosted_info['builtin_agents']) + len(hosted_info['standalone_agents'])

    return hosted_info
publish_and_host_agent(agent, public_name, registry_server='ws://localhost:8080/ws/registry/connect', description=None, access_level='public') async

FIXED: Mit Debug-Ausgaben für Troubleshooting.

Source code in toolboxv2/mods/isaa/module.py
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
async def publish_and_host_agent(
    self,
    agent,
    public_name: str,
    registry_server: str = "ws://localhost:8080/ws/registry/connect",
    description: str | None = None,
    access_level: str = "public"
) -> dict[str, Any]:
    """FIXED: Mit Debug-Ausgaben für Troubleshooting."""

    if hasattr(agent, 'name') and not hasattr(agent, 'amd') and hasattr(agent, 'a_run'):
        agent.amd = lambda :None
        agent.amd.name = agent.name

    try:
        # Registry Client initialisieren
        from toolboxv2.mods.registry.client import get_registry_client
        registry_client = get_registry_client(self.app)

        self.app.print(f"Connecting to registry server: {registry_server}")
        await registry_client.connect(registry_server)

        # Progress Callback für Live-Updates einrichten
        callback_success = await self.setup_live_progress_callback(agent, registry_client, f"agent_{agent.amd.name}")
        if not callback_success:
            self.app.print("Warning: Progress callback setup failed")
        else:
            self.app.print("✅ Progress callback setup successful")

        # Agent beim Registry registrieren
        self.app.print(f"Registering agent: {public_name}")
        registration_info = await registry_client.register(
            agent_instance=agent,
            public_name=public_name,
            description=description or f"Agent: {public_name}"
        )

        if not registration_info:
            return {"error": "Registration failed", "success": False}

        self.app.print(f"✅ Agent registration successful: {registration_info.public_agent_id}")

        result = {
            "success": True,
            "agent_name": public_name,
            "public_agent_id": registration_info.public_agent_id,
            "public_api_key": registration_info.public_api_key,
            "public_url": registration_info.public_url,
            "registry_server": registry_server,
            "access_level": access_level,
            "ui_url": registration_info.public_url.replace("/api/registry/run", "/api/registry/ui"),
            "websocket_url": registry_server.replace("/connect", "/ui_connect"),
            "status": "registered"
        }

        return result

    except Exception as e:
        self.app.print(f"Failed to publish agent: {e}")
        return {"error": str(e), "success": False}
setup_live_progress_callback(agent, registry_client, agent_id=None) async

Enhanced setup for live progress callback with proper error handling.

Source code in toolboxv2/mods/isaa/module.py
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
async def setup_live_progress_callback(self, agent, registry_client, agent_id: str = None):
    """Enhanced setup for live progress callback with proper error handling."""

    if not registry_client:
        self.app.print("Warning: No registry client provided for progress updates")
        return False

    if not registry_client.is_connected:
        self.app.print("Warning: Registry client is not connected")
        return False

    progress_tracker = EnhancedProgressTracker()

    # Generate agent ID if not provided
    if not agent_id:
        agent_id = getattr(agent, 'name', f'agent_{id(agent)}')

    async def enhanced_live_progress_callback(event: ProgressEvent):
        """Enhanced progress callback with comprehensive data extraction."""
        try:
            # Validate event
            if not event:
                self.app.print("Warning: Received null progress event")
                return

            # Debug output for local development
            event_type = getattr(event, 'event_type', 'unknown')
            status = getattr(event, 'status', 'unknown')
            agent_name = getattr(event, 'agent_name', 'Unknown Agent')

            self.app.print(f"📊 Progress Event: {event_type} | {status} | {agent_name}")

            # Extract comprehensive progress data
            progress_data = progress_tracker.extract_progress_data(event)

            # Prepare enhanced progress message
            ui_progress_data = {
                "agent_id": agent_id,
                "event_type": event_type,
                "status": status.value if hasattr(status, 'value') else str(status),
                "timestamp": getattr(event, 'timestamp', asyncio.get_event_loop().time()),
                "agent_name": agent_name,
                "node_name": getattr(event, 'node_name', 'Unknown'),
                "session_id": getattr(event, 'session_id', None),

                # Core event metadata
                "metadata": {
                    **getattr(event, 'metadata', {}),
                    "event_id": getattr(event, 'event_id', f"evt_{asyncio.get_event_loop().time()}"),
                    "sequence_number": getattr(event, 'sequence_number', 0),
                    "parent_event_id": getattr(event, 'parent_event_id', None)
                },

                # Detailed progress data for UI panels
                "progress_data": progress_data,

                # UI-specific flags for selective updates
                "ui_flags": {
                    "should_update_outline": bool(progress_data.get('outline')),
                    "should_update_activity": bool(progress_data.get('activity')),
                    "should_update_meta_tools": bool(progress_data.get('meta_tool')),
                    "should_update_system": bool(progress_data.get('system')),
                    "should_update_graph": bool(progress_data.get('graph')),
                    "is_error": event_type.lower() in ['error', 'exception', 'failed'],
                    "is_completion": event_type.lower() in ['complete', 'finished', 'success'],
                    "requires_user_input": getattr(event, 'requires_user_input', False)
                },

                # Performance metrics
                "performance": {
                    "execution_time": getattr(event, 'execution_time', None),
                    "memory_delta": getattr(event, 'memory_delta', None),
                    "tokens_used": getattr(event, 'tokens_used', None),
                    "api_calls_made": getattr(event, 'api_calls_made', None)
                }
            }

            # Send live update to registry server
            await registry_client.send_ui_progress(ui_progress_data)

            # Also send agent status update if this is a significant event
            if event_type in ['started', 'completed', 'error', 'paused', 'resumed']:
                agent_status = 'processing'
                if event_type == 'completed':
                    agent_status = 'idle'
                elif event_type == 'error':
                    agent_status = 'error'
                elif event_type == 'paused':
                    agent_status = 'paused'

                await registry_client.send_agent_status(
                    agent_id=agent_id,
                    status=agent_status,
                    details={
                        "last_event": event_type,
                        "last_update": ui_progress_data["timestamp"],
                        "current_node": progress_data.get('graph', {}).get('current_node', 'Unknown')
                    }
                )

            # Log successful progress update
            self.app.print(f"✅ Sent progress update: {event_type} -> Registry Server")

        except Exception as e:
            self.app.print(f"❌ Progress callback error: {e}")
            # Send error notification to UI
            try:
                await registry_client.send_ui_progress({
                    "agent_id": agent_id,
                    "event_type": "progress_callback_error",
                    "status": "error",
                    "timestamp": asyncio.get_event_loop().time(),
                    "agent_name": getattr(agent, 'name', 'Unknown'),
                    "metadata": {"error": str(e)},
                    "ui_flags": {"is_error": True}
                })
            except Exception as nested_error:
                self.app.print(f"Failed to send error notification: {nested_error}")

    # Set up progress callback with enhanced error handling
    callback_set = False

    if hasattr(agent, 'set_progress_callback'):
        try:
            self.app.print(f"🔧 Setting progress callback via set_progress_callback for agent: {agent_id}")
            agent.set_progress_callback(enhanced_live_progress_callback)
            callback_set = True
        except Exception as e:
            self.app.print(f"Failed to set progress callback via set_progress_callback: {e}")

    if not callback_set and hasattr(agent, 'progress_callback'):
        try:
            self.app.print(f"🔧 Setting progress callback via direct assignment for agent: {agent_id}")
            agent.progress_callback = enhanced_live_progress_callback
            callback_set = True
        except Exception as e:
            self.app.print(f"Failed to set progress callback via direct assignment: {e}")

    if not callback_set:
        self.app.print(f"⚠️ Warning: Agent {agent_id} doesn't support progress callbacks")
        return False

    # Send initial agent status
    try:
        await registry_client.send_agent_status(
            agent_id=agent_id,
            status='online',
            details={
                "progress_callback_enabled": True,
                "callback_setup_time": asyncio.get_event_loop().time(),
                "agent_type": type(agent).__name__
            }
        )
        self.app.print(f"✅ Progress callback successfully set up for agent: {agent_id}")
    except Exception as e:
        self.app.print(f"Failed to send initial agent status: {e}")

    return True
stop_hosted_agent(agent_id=None, port=None) async

Stop a hosted agent by agent_id or port.

Source code in toolboxv2/mods/isaa/module.py
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
async def stop_hosted_agent(self, agent_id: str = None, port: int = None):
    """Stop a hosted agent by agent_id or port."""

    if not hasattr(self, '_hosted_agents') and not hasattr(self, '_standalone_servers'):
        self.app.print("No hosted agents found")
        return False

    # Stop by agent_id
    if agent_id:
        if hasattr(self, '_hosted_agents') and agent_id in self._hosted_agents:
            agent_info = self._hosted_agents[agent_id]
            agent_port = agent_info.get('port')

            # Stop standalone server if exists
            if hasattr(self, '_standalone_servers') and agent_port in self._standalone_servers:
                server_info = self._standalone_servers[agent_port]
                try:
                    server_info['server'].shutdown()
                    self.app.print(f"Stopped standalone server for agent {agent_id}")
                except:
                    pass

            # Clean up hosted agent info
            del self._hosted_agents[agent_id]
            self.app.print(f"Stopped hosted agent {agent_id}")
            return True

    # Stop by port
    if port:
        if hasattr(self, '_standalone_servers') and port in self._standalone_servers:
            server_info = self._standalone_servers[port]
            try:
                server_info['server'].shutdown()
                self.app.print(f"Stopped server on port {port}")
                return True
            except Exception as e:
                self.app.print(f"Failed to stop server on port {port}: {e}")
                return False

    self.app.print("Agent or port not found")
    return False

ui

get_agent_ui_html()

Produktionsfertige UI mit Live-Progress-Tracking.

Source code in toolboxv2/mods/isaa/ui.py
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
def get_agent_ui_html() -> str:
    """Produktionsfertige UI mit Live-Progress-Tracking."""

    return """<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Agent Registry - Live Interface</title>
    <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
    <style>
        /* Modernes Dark Theme UI */
        :root {
            --bg-primary: #0d1117;
            --bg-secondary: #161b22;
            --bg-tertiary: #21262d;
            --text-primary: #f0f6fc;
            --text-secondary: #8b949e;
            --text-muted: #6e7681;
            --accent-blue: #58a6ff;
            --accent-green: #3fb950;
            --accent-red: #f85149;
            --accent-orange: #d29922;
            --accent-purple: #a5a5f5;
            --accent-cyan: #39d0d8;
            --border-color: #30363d;
            --shadow: 0 2px 8px rgba(0, 0, 0, 0.3);

            --sidebar-width: 300px;
            --progress-width: 660px;
            --sidebar-collapsed: 60px;
            --progress-collapsed: 60px;
        }
        /* Enhanced Progress Panel Styles */
        .progress-section {
            margin-bottom: 16px;
        }

        /* ADD to existing CSS */
        .event-status-badge {
            padding: 2px 6px;
            border-radius: 3px;
            font-size: 10px;
            font-weight: 500;
        }

        .event-status-badge.completed {
            background: var(--accent-green);
            color: white;
        }

        .event-status-badge.running {
            background: var(--accent-orange);
            color: white;
        }

        .event-status-badge.failed, .event-status-badge.error {
            background: var(--accent-red);
            color: white;
        }

        .event-status-badge.starting {
            background: var(--accent-cyan);
            color: white;
        }

        .progress-item.expandable[data-event-id*="tool_call"] {
            border-left-color: var(--accent-orange);
        }

        .progress-item.expandable[data-event-id*="llm_call"] {
            border-left-color: var(--accent-purple);
        }

        .progress-item.expandable[data-event-id*="meta_tool"] {
            border-left-color: var(--accent-cyan);
        }

        .progress-item.expandable[data-event-id*="error"] {
            border-left-color: var(--accent-red);
            background: rgba(248, 81, 73, 0.02);
        }

        .section-title.expandable-section {
            cursor: pointer;
            display: flex;
            justify-content: space-between;
            align-items: center;
            padding: 8px 12px;
            background: var(--bg-primary);
            border: 1px solid var(--border-color);
            border-radius: 6px;
            transition: all 0.2s;
        }

        .section-title.expandable-section:hover {
            background: var(--bg-tertiary);
        }

        .section-toggle {
            transition: transform 0.2s;
            font-size: 12px;
        }

        .section-content {
            max-height: 0;
            overflow: hidden;
            transition: max-height 0.3s ease-out;
            background: var(--bg-primary);
            border-radius: 0 0 6px 6px;
        }

        .section-content.expanded {
            max-height: 900px;
            padding: 12px;
            border: 1px solid var(--border-color);
            border-top: none;
            overflow-y: auto;
        }

        .no-data {
            color: var(--text-muted);
            font-size: 12px;
            text-align: center;
            padding: 12px;
            font-style: italic;
        }

        /* Expandable Progress Items */
        .progress-item.expandable {
            cursor: pointer;
            transition: all 0.2s;
            margin-bottom: 8px;
        }

        .progress-item.expandable:hover {
            transform: translateY(-1px);
            box-shadow: 0 4px 12px rgba(0, 0, 0, 0.15);
        }

        .progress-item.expandable.expanded {
            border-color: var(--accent-blue);
        }

        .progress-item.expandable.latest {
            border-left: 3px solid var(--accent-green);
            background: rgba(63, 185, 80, 0.05);
        }

        .progress-item-header {
            display: flex;
            align-items: center;
            gap: 8px;
            padding: 8px 12px;
        }

        .progress-meta {
            margin-left: auto;
            display: flex;
            align-items: center;
            gap: 8px;
        }

        .expand-indicator {
            transition: transform 0.2s;
            font-size: 12px;
            color: var(--text-muted);
        }

        .progress-item.expanded .expand-indicator {
            transform: rotate(180deg);
        }

        .progress-summary {
            padding: 0 12px 8px 36px;
            font-size: 11px;
            color: var(--text-secondary);
        }

        .progress-item-expanded {
            max-height: 0;
            overflow: hidden;
            transition: max-height 0.3s ease-out;
            background: var(--bg-secondary);
            border-top: 1px solid var(--border-color);
        }

        .progress-item-expanded.active {
            max-height: 400px;
            padding: 12px;
            overflow-y: auto;
        }

        .expanded-section {
            margin-bottom: 12px;
        }

        .expanded-section-title {
            font-size: 12px;
            font-weight: 600;
            color: var(--accent-blue);
            margin-bottom: 6px;
            padding-bottom: 4px;
            border-bottom: 1px solid var(--border-color);
        }

        .event-field {
            display: flex;
            justify-content: space-between;
            align-items: flex-start;
            padding: 4px 0;
            font-size: 11px;
        }

        .event-field-label {
            font-weight: 500;
            color: var(--text-secondary);
            min-width: 80px;
        }

        .event-field-value {
            color: var(--text-primary);
            text-align: right;
            flex: 1;
        }

        .event-field-value.json {
            background: var(--bg-primary);
            border-radius: 4px;
            padding: 6px;
            font-family: monospace;
            font-size: 10px;
            text-align: left;
            white-space: pre-wrap;
            max-height: 100px;
            overflow-y: auto;
        }

        /* ADD to existing CSS */
.thinking-step.outline-step {
    border-color: var(--accent-cyan);
    background: rgba(57, 208, 216, 0.05);
}

.thinking-step.outline-step.completed {
    border-color: var(--accent-green);
    background: rgba(63, 185, 80, 0.05);
}

.thinking-step.outline-step.running {
    border-color: var(--accent-orange);
    background: rgba(210, 153, 34, 0.05);
}

.outline-progress {
    margin: 8px 0;
}

.progress-info {
    display: flex;
    justify-content: space-between;
    align-items: center;
    margin-bottom: 6px;
}

.progress-text {
    font-size: 11px;
    color: var(--text-secondary);
    font-weight: 500;
}

.progress-percentage {
    font-size: 11px;
    color: var(--accent-blue);
    font-weight: 600;
}

.progress-bar-container {
    margin-bottom: 8px;
}

.progress-bar {
    height: 6px;
    background: var(--bg-primary);
    border-radius: 3px;
    overflow: hidden;
    border: 1px solid var(--border-color);
}

.progress-bar-fill {
    height: 100%;
    background: linear-gradient(90deg, var(--accent-cyan), var(--accent-blue));
    transition: width 0.5s ease-out;
    position: relative;
}

.progress-bar-fill::after {
    content: '';
    position: absolute;
    top: 0;
    left: 0;
    right: 0;
    bottom: 0;
    background: linear-gradient(90deg, transparent, rgba(255,255,255,0.2), transparent);
    animation: shimmer 2s infinite;
}

@keyframes shimmer {
    0% { transform: translateX(-100%); }
    100% { transform: translateX(100%); }
}

.step-completed {
    color: var(--accent-green);
    font-size: 10px;
    text-align: center;
    font-weight: 500;
}

.step-working {
    color: var(--accent-orange);
    font-size: 10px;
    text-align: center;
    font-style: italic;
}

.context-info {
    display: flex;
    justify-content: center;
    gap: 12px;
    margin-top: 8px;
    padding-top: 8px;
    border-top: 1px solid var(--border-color);
}

.context-item {
    font-size: 10px;
    color: var(--text-muted);
    background: var(--bg-primary);
    padding: 2px 6px;
    border-radius: 3px;
    border: 1px solid var(--border-color);
}

.thinking-step.plan-created {
    border-color: var(--accent-blue);
    background: rgba(88, 166, 255, 0.05);
}

.plan-details {
    text-align: center;
}

.plan-info {
    display: flex;
    justify-content: center;
    gap: 12px;
    margin-bottom: 8px;
}

.plan-item {
    font-size: 11px;
    color: var(--text-secondary);
    background: var(--bg-primary);
    padding: 4px 8px;
    border-radius: 4px;
    border: 1px solid var(--border-color);
}

.plan-ready, .outline-ready {
    margin-top: 8px;
    color: var(--accent-green);
    font-size: 10px;
    text-align: center;
}

.step-status {
    padding: 2px 6px;
    border-radius: 3px;
    font-size: 10px;
    font-weight: 500;
    margin-left: auto;
}

.step-status.completed {
    background: var(--accent-green);
    color: white;
}

.step-status.running {
    background: var(--accent-orange);
    color: white;
}

.step-status.ready {
    background: var(--accent-blue);
    color: white;
}

        /* Enhanced Chat Integration Styles */
        .thinking-step {
            background: var(--bg-secondary);
            border: 1px solid var(--border-color);
            border-radius: 8px;
            padding: 10px 12px;
            margin: 8px 0;
            font-size: 13px;
            transition: all 0.2s;
        }

        .thinking-step:hover {
            transform: translateY(-1px);
            box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1);
        }

        .thinking-step.reasoning-loop {
            border-color: var(--accent-purple);
            background: rgba(165, 165, 245, 0.05);
        }

        .thinking-step.outline-created {
            border-color: var(--accent-cyan);
            background: rgba(57, 208, 216, 0.05);
        }

        .thinking-step.task-progress.starting {
            border-color: var(--accent-orange);
            background: rgba(210, 153, 34, 0.05);
        }

        .thinking-step.task-progress.completed {
            border-color: var(--accent-green);
            background: rgba(63, 185, 80, 0.05);
        }

        .thinking-step.task-progress.error {
            border-color: var(--accent-red);
            background: rgba(248, 81, 73, 0.05);
        }

        .thinking-step-header {
            display: flex;
            align-items: center;
            gap: 8px;
            font-weight: 600;
            margin-bottom: 8px;
            color: var(--text-primary);
        }

        .step-progress, .step-info, .step-status {
            margin-left: auto;
            font-size: 10px;
            font-weight: normal;
            color: var(--text-muted);
            background: var(--bg-primary);
            padding: 2px 6px;
            border-radius: 3px;
        }

        .priority-badge {
            padding: 2px 6px;
            border-radius: 3px;
            font-size: 10px;
            font-weight: 500;
        }

        .priority-badge.high {
            background: var(--accent-red);
            color: white;
        }

        .priority-badge.normal {
            background: var(--accent-blue);
            color: white;
        }

        .priority-badge.low {
            background: var(--text-muted);
            color: white;
        }

        /* Performance Metrics Grid */
        .metrics-grid {
            display: grid;
            grid-template-columns: repeat(auto-fit, minmax(100px, 1fr));
            gap: 8px;
        }

        .metric-card {
            background: var(--bg-primary);
            border: 1px solid var(--border-color);
            border-radius: 6px;
            padding: 8px;
            text-align: center;
        }

        .metric-label {
            font-size: 10px;
            color: var(--text-muted);
            margin-bottom: 4px;
        }

        .metric-value {
            font-size: 14px;
            font-weight: 600;
            color: var(--accent-blue);
        }

        .reasoning-metrics .metric-grid {
            display: grid;
            grid-template-columns: repeat(3, 1fr);
            gap: 8px;
            margin-bottom: 8px;
        }

        .metric-item {
            display: flex;
            justify-content: space-between;
            align-items: center;
            font-size: 11px;
        }

        .metric-label {
            color: var(--text-muted);
        }

        .metric-value {
            color: var(--text-primary);
            font-weight: 500;
        }

        /* Progress Bar */
        .progress-bar-container {
            margin: 8px 0;
        }

        .progress-bar-info {
            display: flex;
            justify-content: space-between;
            font-size: 10px;
            color: var(--text-muted);
            margin-bottom: 4px;
        }

        .progress-bar {
            height: 4px;
            background: var(--bg-primary);
            border-radius: 2px;
            overflow: hidden;
        }

        .progress-bar-fill {
            height: 100%;
            background: var(--accent-blue);
            transition: width 0.5s ease-out;
        }

        /* Outline Display */
        .outline-steps {
            margin: 8px 0;
        }

        .outline-step {
            display: flex;
            align-items: flex-start;
            gap: 8px;
            margin-bottom: 4px;
            font-size: 11px;
        }

        .step-number {
            color: var(--accent-blue);
            font-weight: 600;
            min-width: 20px;
        }

        .step-text {
            color: var(--text-primary);
            line-height: 1.3;
        }

        .context-metrics {
            display: grid;
            grid-template-columns: repeat(3, 1fr);
            gap: 8px;
            margin-bottom: 8px;
        }

        .context-metric {
            display: flex;
            flex-direction: column;
            align-items: center;
            font-size: 10px;
            padding: 6px;
            background: var(--bg-primary);
            border-radius: 4px;
        }

        .context-label {
            color: var(--text-muted);
            margin-bottom: 2px;
        }

        .context-value {
            color: var(--text-primary);
            font-weight: 600;
        }

        .task-description {
            margin-bottom: 6px;
            font-weight: 500;
        }

        .task-timing {
            font-size: 10px;
            color: var(--accent-green);
        }

        .task-error {
            font-size: 10px;
            color: var(--accent-red);
            background: rgba(248, 81, 73, 0.1);
            padding: 4px;
            border-radius: 3px;
            margin-top: 4px;
        }

        .reasoning-insight {
            margin-top: 8px;
            font-size: 11px;
            color: var(--accent-purple);
            text-align: center;
            font-style: italic;
        }

        .idle-status {
            border-color: var(--accent-green);
            background: rgba(63, 185, 80, 0.02);
        }

        @media (max-width: 1200px) {
            :root {
                --sidebar-width: 250px;
                --progress-width: 580px;
            }
        }

        @media (max-width: 1024px) {
            :root {
                --sidebar-width: 220px;
                --progress-width: 460px;
            }
        }

        .sidebar.collapsed::before {
            content: '📋';
            font-size: 20px;
            display: flex;
            align-items: center;
            justify-content: center;
            padding: 20px 0;
            border-bottom: 1px solid var(--border-color);
        }

        .progress-panel.collapsed::before {
            content: '📊';
            font-size: 20px;
            display: flex;
            align-items: center;
            justify-content: center;
            padding: 20px 0;
            border-bottom: 1px solid var(--border-color);
            writing-mode: vertical-lr;
        }

        .sidebar, .progress-panel {
            transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);
        }

        .main-container {
            transition: grid-template-columns 0.3s cubic-bezier(0.4, 0, 0.2, 1);
        }

        * { margin: 0; padding: 0; box-sizing: border-box; }

        body {
            font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif;
            background: var(--bg-primary);
            color: var(--text-primary);
            height: 100vh;
            display: flex;
            flex-direction: column;
            overflow: hidden;
        }

        html, body {
            height: 100%;
            overflow: hidden;
        }

        .api-key-modal {
            position: fixed;
            top: 0;
            left: 0;
            right: 0;
            bottom: 0;
            background: rgba(0, 0, 0, 0.8);
            display: flex;
            align-items: center;
            justify-content: center;
            z-index: 1000;
        }

        .api-key-content {
            background: var(--bg-secondary);
            border: 1px solid var(--border-color);
            border-radius: 12px;
            padding: 24px;
            max-width: 500px;
            width: 90%;
            text-align: center;
        }

        .api-key-title {
            font-size: 20px;
            font-weight: 600;
            color: var(--accent-blue);
            margin-bottom: 16px;
        }

        .api-key-description {
            color: var(--text-secondary);
            margin-bottom: 20px;
            line-height: 1.5;
        }

        .api-key-input {
            width: 100%;
            background: var(--bg-primary);
            border: 1px solid var(--border-color);
            border-radius: 8px;
            padding: 12px;
            color: var(--text-primary);
            font-size: 14px;
            margin-bottom: 16px;
        }

        .api-key-button {
            background: var(--accent-blue);
            color: white;
            border: none;
            border-radius: 8px;
            padding: 12px 24px;
            cursor: pointer;
            font-weight: 600;
        }

        /* Updated Header */
        .header {
            background: var(--bg-tertiary);
            padding: 16px 24px;
            border-bottom: 1px solid var(--border-color);
            display: flex;
            align-items: center;
            justify-content: space-between;
            box-shadow: var(--shadow);
            flex-shrink: 0;
        }

        .header-controls {
            display: flex;
            align-items: center;
            gap: 12px;
        }

        .panel-toggle {
            background: var(--bg-secondary);
            border: 1px solid var(--border-color);
            color: var(--text-primary);
            padding: 8px 12px;
            border-radius: 6px;
            cursor: pointer;
            font-size: 12px;
            transition: all 0.2s;
        }

        .panel-toggle:hover {
            background: var(--bg-primary);
        }

        .panel-toggle.active {
            background: var(--accent-blue);
            color: white;
        }

        .logo {
            display: flex;
            align-items: center;
            gap: 12px;
            font-size: 20px;
            font-weight: 700;
            color: var(--accent-blue);
        }

        .connection-status {
            display: flex;
            align-items: center;
            gap: 12px;
        }

        .status-indicator {
            display: flex;
            align-items: center;
            gap: 8px;
            padding: 8px 12px;
            border-radius: 6px;
            font-size: 14px;
            font-weight: 500;
        }

        .status-indicator.connected {
            background: rgba(63, 185, 80, 0.1);
            color: var(--accent-green);
            border: 1px solid var(--accent-green);
        }

        .status-indicator.disconnected {
            background: rgba(248, 81, 73, 0.1);
            color: var(--accent-red);
            border: 1px solid var(--accent-red);
        }

        .status-dot {
            width: 8px;
            height: 8px;
            border-radius: 50%;
            background: currentColor;
            animation: pulse 2s infinite;
        }

        .status-dot.connected { animation: none; }

        @keyframes pulse {
            0%, 100% { opacity: 1; }
            50% { opacity: 0.4; }
        }

        /* FIXED: Better grid layout that properly handles collapsing */
        .main-container {
            display: grid;
            grid-template-areas: "sidebar chat progress";
            grid-template-columns: var(--sidebar-width) 1fr var(--progress-width);
            flex: 1;
            overflow: hidden;
            min-height: 0;
            height: 100%;
        }

        .main-container.sidebar-collapsed {
            grid-template-columns: var(--sidebar-collapsed) 1fr var(--progress-width);
        }

        .main-container.progress-collapsed {
            grid-template-columns: var(--sidebar-width) 1fr var(--progress-collapsed);
        }

        .main-container.both-collapsed {
            grid-template-columns: var(--sidebar-collapsed) 1fr var(--progress-collapsed);
        }

        .sidebar {
            grid-area: sidebar;
            background: var(--bg-secondary);
            border-right: 1px solid var(--border-color);
            display: flex;
            flex-direction: column;
            overflow: hidden;
            height: 100%;
        }

        .sidebar.collapsed .agents-list,
        .sidebar.collapsed .system-info {
            display: none;
        }

        .sidebar.collapsed .sidebar-header {
            padding: 12px 8px;
            justify-content: center;
        }

        .sidebar.collapsed .sidebar-title {
            display: none;
        }

        .sidebar.collapsed .collapse-btn {
            writing-mode: vertical-lr;
            text-orientation: mixed;
        }

        .progress-panel.collapsed .collapse-btn {
            writing-mode: vertical-lr;
            text-orientation: mixed;
            transform: rotate(180deg);
        }

        .sidebar-header {
            padding: 12px 16px;
            background: var(--bg-tertiary);
            border-bottom: 1px solid var(--border-color);
            display: flex;
            align-items: center;
            justify-content: space-between;
            min-height: 48px;
        }

        .sidebar-title {
            font-size: 14px;
            font-weight: 600;
            color: var(--text-secondary);
            text-transform: uppercase;
        }

        .collapse-btn {
            background: none;
            border: none;
            color: var(--text-muted);
            cursor: pointer;
            padding: 4px;
            border-radius: 4px;
            transition: all 0.2s;
        }

        .collapse-btn:hover {
            background: var(--bg-primary);
            color: var(--text-primary);
        }

        /* FIXED: Chat area properly uses grid area and expands */
        .chat-area {
            grid-area: chat;
            display: flex;
            flex-direction: column;
            background: var(--bg-primary);
            min-height: 0;
            height: 100%;
            overflow: hidden;
        }

        /* Updated Progress Panel */
        .progress-panel {
            grid-area: progress;
            background: var(--bg-secondary);
            border-left: 1px solid var(--border-color);
            display: flex;
            flex-direction: column;
            overflow: hidden;
            height: 100%;
        }

        .progress-panel.collapsed .panel-content {
            display: none;
        }

        .progress-panel.collapsed .progress-header {
            padding: 12px 8px;
            justify-content: center;
            writing-mode: vertical-lr;
            text-orientation: mixed;
        }

        .progress-panel.collapsed .progress-header span {
            transform: rotate(180deg);
        }

        .progress-header {
            padding: 12px 16px;
            background: var(--bg-tertiary);
            border-bottom: 1px solid var(--border-color);
            display: flex;
            align-items: center;
            justify-content: space-between;
            font-weight: 600;
            font-size: 14px;
            min-height: 48px;
        }

        /* ADD to existing CSS */
        .progress-item.llm_call {
            border-left: 3px solid var(--accent-purple);
        }

        .progress-item.llm_call.latest {
            border-left: 3px solid var(--accent-purple);
            background: rgba(165, 165, 245, 0.03);
        }

        .progress-item.llm_call .progress-icon {
            color: var(--accent-purple);
        }

        .progress-summary {
            padding: 0 12px 8px 36px;
            font-size: 10px;
            color: var(--text-secondary);
            line-height: 1.3;
        }

        .event-field-value.json {
            background: var(--bg-primary);
            border-radius: 4px;
            padding: 8px;
            font-family: 'Consolas', 'Monaco', monospace;
            font-size: 10px;
            text-align: left;
            white-space: pre-wrap;
            max-height: 300px;
            overflow-y: auto;
            border: 1px solid var(--border-color);
            word-break: break-all;
        }

        .expanded-section {
            margin-bottom: 12px;
            border-bottom: 1px solid rgba(48, 54, 61, 0.3);
            padding-bottom: 8px;
        }

        .expanded-section:last-child {
            border-bottom: none;
            margin-bottom: 0;
        }

        /* FIXED: Hide mobile tabs on desktop by default */
        .mobile-tabs {
            display: none;
        }

        /* Mobile Responsive */
        @media (max-width: 768px) {
            .main-container {
                display: flex !important;
                flex-direction: column;
                height: 100%;
                grid-template-areas: none;
                grid-template-columns: none;
            }

            .mobile-tabs {
                display: flex;
                background: var(--bg-tertiary);
                border-bottom: 1px solid var(--border-color);
                flex-shrink: 0;
            }

            .header-controls {
                display: none;
            }

            .mobile-tab {
                flex: 1;
                padding: 12px;
                text-align: center;
                background: var(--bg-secondary);
                border-right: 1px solid var(--border-color);
                cursor: pointer;
                transition: all 0.2s;
                font-size: 14px;
            }

            .mobile-tab:last-child {
                border-right: none;
            }

            .mobile-tab.active {
                background: var(--accent-blue);
                color: white;
            }

            .sidebar,
            .progress-panel {
                flex: 1;
                border-right: none;
                border-left: none;
                border-bottom: 1px solid var(--border-color);
                min-height: 0;
                max-height: none;
            }

            .chat-area {
                flex: 1;
                min-height: 0;
            }

            .sidebar,
            .chat-area,
            .progress-panel {
                display: none;
            }
        }

        @media (min-width: 769px) {
            .main-container {
                display: grid !important;
            }

            .sidebar,
            .chat-area,
            .progress-panel {
                display: flex !important;
                height: 100%;
            }
        }

        .agents-list {
            flex: 1;
            overflow-y: auto;
            padding: 16px;
            min-height: 0;
        }

        .agents-header {
            font-size: 14px;
            font-weight: 600;
            color: var(--text-secondary);
            margin-bottom: 12px;
            text-transform: uppercase;
            letter-spacing: 0.5px;
        }

        .agent-item {
            padding: 12px;
            margin-bottom: 8px;
            background: var(--bg-tertiary);
            border: 1px solid var(--border-color);
            border-radius: 8px;
            cursor: pointer;
            transition: all 0.2s;
        }

        .agent-item:hover {
            border-color: var(--accent-blue);
            transform: translateY(-1px);
        }

        .agent-item.active {
            border-color: var(--accent-blue);
            background: rgba(88, 166, 255, 0.1);
        }

        .agent-name {
            font-weight: 600;
            color: var(--text-primary);
            margin-bottom: 4px;
        }

        .agent-description {
            font-size: 12px;
            color: var(--text-muted);
            margin-bottom: 6px;
        }

        .agent-status {
            display: flex;
            align-items: center;
            gap: 6px;
            font-size: 11px;
        }

        .agent-status.online { color: var(--accent-green); }
        .agent-status.offline { color: var(--accent-red); }

        .chat-header {
            padding: 16px 20px;
            border-bottom: 1px solid var(--border-color);
            background: var(--bg-tertiary);
            flex-shrink: 0;
        }

        .chat-title {
            font-size: 16px;
            font-weight: 600;
            color: var(--text-primary);
            margin-bottom: 4px;
        }

        .chat-subtitle {
            font-size: 12px;
            color: var(--text-muted);
        }

        .messages-container {
            flex: 1;
            overflow-y: auto;
            padding: 20px;
            display: flex;
            flex-direction: column;
            gap: 16px;
            min-height: 0;
        }

        .message {
            display: flex;
            gap: 12px;
            max-width: 85%;
        }

        .message.user {
            flex-direction: row-reverse;
            margin-left: auto;
        }

        .message-avatar {
            width: 36px;
            height: 36px;
            border-radius: 50%;
            display: flex;
            align-items: center;
            justify-content: center;
            font-size: 14px;
            font-weight: 600;
            flex-shrink: 0;
        }

        .message.user .message-avatar {
            background: var(--accent-blue);
            color: white;
        }

        .message.agent .message-avatar {
            background: var(--accent-green);
            color: white;
        }

        .message-content {
            padding: 12px 16px;
            border-radius: 16px;
            line-height: 1.5;
            font-size: 14px;
        }

        .message.user .message-content {
            background: var(--accent-blue);
            color: white;
        }

        .message.agent .message-content {
            background: var(--bg-tertiary);
            border: 1px solid var(--border-color);
            color: var(--text-primary);
        }

        /* NEW: Thinking step styles */
        .thinking-step {
            background: var(--bg-secondary);
            border: 1px solid var(--accent-purple);
            border-radius: 12px;
            padding: 12px 16px;
            margin: 8px 0;
            font-size: 13px;
            color: var(--text-secondary);
        }

        .thinking-step.outline-step {
            border-color: var(--accent-cyan);
            background: rgba(57, 208, 216, 0.05);
        }

        .thinking-step-header {
            display: flex;
            align-items: center;
            gap: 8px;
            font-weight: 600;
            margin-bottom: 6px;
            color: var(--text-primary);
        }

        .thinking-step-content {
            line-height: 1.4;
        }

        .message-input {
            border-top: 1px solid var(--border-color);
            padding: 16px 20px;
            display: flex;
            gap: 12px;
            flex-shrink: 0;
            background: var(--bg-secondary);
        }

        .input-field {
            flex: 1;
            background: var(--bg-primary);
            border: 1px solid var(--border-color);
            border-radius: 8px;
            padding: 12px;
            color: var(--text-primary);
            font-size: 14px;
        }

        .input-field:focus {
            outline: none;
            border-color: var(--accent-blue);
        }

        .send-button {
            background: var(--accent-blue);
            color: white;
            border: none;
            border-radius: 8px;
            padding: 12px 20px;
            cursor: pointer;
            font-weight: 600;
            transition: all 0.2s;
        }

        .send-button:hover:not(:disabled) {
            background: #4493f8;
            transform: translateY(-1px);
        }

        .send-button:disabled {
            opacity: 0.5;
            cursor: not-allowed;
            transform: none;
        }

        .panel-header {
            padding: 16px;
            background: var(--bg-tertiary);
            border-bottom: 1px solid var(--border-color);
            font-weight: 600;
            font-size: 14px;
        }

        .panel-content {
            flex: 1;
            overflow-y: auto;
            padding: 16px;
            min-height: 0;
        }

        .progress-section {
            margin-bottom: 20px;
        }

        .section-title {
            font-size: 12px;
            font-weight: 600;
            color: var(--text-muted);
            text-transform: uppercase;
            margin-bottom: 8px;
            letter-spacing: 0.5px;
        }

        /* NEW: Enhanced progress item styles */
        .progress-item {
            background: var(--bg-primary);
            border: 1px solid var(--border-color);
            border-radius: 8px;
            padding: 12px;
            margin-bottom: 8px;
            font-size: 12px;
            transition: all 0.2s;
        }

        .progress-item:hover {
            border-color: var(--accent-blue);
            transform: translateY(-1px);
        }

        .progress-item-header {
            display: flex;
            align-items: center;
            gap: 8px;
            margin-bottom: 6px;
        }

        .progress-icon {
            width: 16px;
            text-align: center;
            font-size: 14px;
        }

        .progress-title {
            font-weight: 500;
            color: var(--text-primary);
            flex: 1;
        }

        .progress-status {
            padding: 2px 6px;
            border-radius: 3px;
            font-size: 10px;
            font-weight: 500;
        }

        .progress-status.running {
            background: var(--accent-orange);
            color: white;
        }

        .progress-status.completed {
            background: var(--accent-green);
            color: white;
        }

        .progress-status.error {
            background: var(--accent-red);
            color: white;
        }

        .progress-status.starting {
            background: var(--accent-cyan);
            color: white;
        }

        .progress-details {
            color: var(--text-secondary);
            font-size: 11px;
            line-height: 1.3;
        }

        .performance-metrics {
            background: rgba(88, 166, 255, 0.05);
            border: 1px solid rgba(88, 166, 255, 0.2);
            border-radius: 6px;
            padding: 8px;
            margin-top: 6px;
            font-size: 10px;
        }

        .performance-metrics .metric {
            display: flex;
            justify-content: space-between;
            margin-bottom: 2px;
        }

        .no-agent-selected {
            display: flex;
            align-items: center;
            justify-content: center;
            flex-direction: column;
            gap: 16px;
            height: 100%;
            color: var(--text-muted);
            text-align: center;
        }

        .no-agent-selected .icon {
            font-size: 48px;
            opacity: 0.5;
        }

        .typing-indicator {
            display: none;
            align-items: center;
            gap: 8px;
            padding: 12px 16px;
            background: var(--bg-tertiary);
            margin: 12px 20px;
            border-radius: 16px;
            font-size: 14px;
            color: var(--text-muted);
            flex-shrink: 0;
        }

        .typing-indicator.active { display: flex; }

        .typing-dots {
            display: flex;
            gap: 4px;
        }

        .typing-dot {
            width: 6px;
            height: 6px;
            border-radius: 50%;
            background: var(--text-muted);
            animation: typing 1.4s infinite;
        }

        .typing-dot:nth-child(2) { animation-delay: 0.2s; }
        .typing-dot:nth-child(3) { animation-delay: 0.4s; }

        @keyframes typing {
            0%, 60%, 100% { opacity: 0.3; }
            30% { opacity: 1; }
        }

        .system-info {
            margin-top: auto;
            padding: 12px;
            border-top: 1px solid var(--border-color);
            font-size: 11px;
            color: var(--text-muted);
            flex-shrink: 0;
        }

        .error-message {
            background: rgba(248, 81, 73, 0.1);
            border: 1px solid var(--accent-red);
            color: var(--accent-red);
            padding: 12px;
            border-radius: 6px;
            margin: 12px;
            font-size: 14px;
            position: fixed;
            bottom: 20px;
            right: 20px;
            z-index: 2000;
            max-width: 300px;
        }

        .event-detail-modal {
            position: fixed;
            top: 0;
            left: 0;
            right: 0;
            bottom: 0;
            background: rgba(0, 0, 0, 0.8);
            display: none;
            align-items: center;
            justify-content: center;
            z-index: 2000;
            padding: 20px;
        }

        .event-detail-modal.active {
            display: flex;
        }

        .event-detail-content {
            background: var(--bg-secondary);
            border: 1px solid var(--border-color);
            border-radius: 12px;
            max-width: 800px;
            max-height: 80vh;
            width: 100%;
            display: flex;
            flex-direction: column;
            overflow: hidden;
            box-shadow: var(--shadow);
        }

        .event-detail-header {
            padding: 20px 24px;
            border-bottom: 1px solid var(--border-color);
            background: var(--bg-tertiary);
            display: flex;
            align-items: center;
            justify-content: space-between;
            flex-shrink: 0;
        }

        .event-detail-title {
            font-size: 18px;
            font-weight: 600;
            color: var(--text-primary);
            display: flex;
            align-items: center;
            gap: 12px;
        }

        .event-detail-close {
            background: none;
            border: none;
            color: var(--text-muted);
            cursor: pointer;
            padding: 8px;
            border-radius: 6px;
            font-size: 20px;
            transition: all 0.2s;
        }

        .event-detail-close:hover {
            background: var(--bg-primary);
            color: var(--text-primary);
        }

        .event-detail-body {
            flex: 1;
            overflow-y: auto;
            padding: 24px;
            min-height: 0;
        }

        .event-section {
            margin-bottom: 24px;
        }

        .event-section-title {
            font-size: 14px;
            font-weight: 600;
            color: var(--accent-blue);
            text-transform: uppercase;
            letter-spacing: 0.5px;
            margin-bottom: 12px;
            padding-bottom: 6px;
            border-bottom: 1px solid var(--border-color);
        }

        .event-field {
            display: flex;
            justify-content: space-between;
            align-items: flex-start;
            padding: 8px 0;
            border-bottom: 1px solid rgba(48, 54, 61, 0.5);
            font-size: 14px;
        }

        .event-field:last-child {
            border-bottom: none;
        }

        .event-field-label {
            font-weight: 500;
            color: var(--text-secondary);
            min-width: 140px;
            flex-shrink: 0;
        }

        .event-field-value {
            color: var(--text-primary);
            flex: 1;
            text-align: right;
            word-break: break-word;
        }

        .event-field-value.json {
            background: var(--bg-primary);
            border-radius: 6px;
            padding: 8px;
            font-family: 'Consolas', 'Monaco', monospace;
            font-size: 12px;
            text-align: left;
            white-space: pre-wrap;
            max-height: 200px;
            overflow-y: auto;
        }

        .event-status-badge {
            padding: 4px 8px;
            border-radius: 4px;
            font-size: 12px;
            font-weight: 500;
        }

        .event-status-badge.completed {
            background: var(--accent-green);
            color: white;
        }

        .event-status-badge.running {
            background: var(--accent-orange);
            color: white;
        }

        .event-status-badge.failed {
            background: var(--accent-red);
            color: white;
        }

        .progress-item {
            cursor: pointer;
            transition: all 0.2s;
        }

        .progress-item:hover {
            transform: translateY(-2px);
            box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
        }

        .thinking-step {
            cursor: pointer;
            transition: all 0.2s;
        }

        .thinking-step:hover {
            transform: translateY(-1px);
            box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
        }
    </style>
</head>
<body>

<div class="api-key-modal" id="api-key-modal">
    <div class="api-key-content">
        <div class="api-key-title">🔐 Enter API Key</div>
        <div class="api-key-description">
            Please enter your API key to access the agent. You can find this key in your agent registration details.
        </div>
        <input type="text" class="api-key-input" id="api-key-input"
               placeholder="tbk_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx">
        <button class="api-key-button" id="api-key-submit">Connect</button>
    </div>
</div>

<div class="header">
    <div class="logo">
        <span>🤖</span>
        <span>Agent Registry</span>
    </div>
    <div class="header-controls">
        <button class="panel-toggle active" id="sidebar-toggle">📋 Agents</button>
        <button class="panel-toggle active" id="progress-toggle">📊 Progress</button>
        <div class="status-indicator disconnected" id="connection-status">
            <div class="status-dot"></div>
            <span>Connecting...</span>
        </div>
    </div>
</div>

<div class="mobile-tabs">
    <div class="mobile-tab active" data-tab="chat">💬 Chat</div>
    <div class="mobile-tab" data-tab="agents">📋 Agents</div>
    <div class="mobile-tab" data-tab="progress">📊 Progress</div>
</div>

<div class="main-container">
    <!-- Agents Sidebar -->
    <div class="sidebar" id="sidebar">
        <div class="sidebar-header">
            <div class="sidebar-title">Available Agents</div>
            <button class="collapse-btn" id="sidebar-collapse">◀</button>
        </div>
        <div class="agents-list">
            <div id="agents-container">
                <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 20px;">
                    Loading agents...
                </div>
            </div>
        </div>
        <div class="system-info">
            <div>Registry Server</div>
            <div id="server-info">ws://localhost:8080</div>
        </div>
    </div>

    <!-- Chat Area -->
    <div class="chat-area">
        <div class="chat-header">
            <div class="chat-title" id="chat-title">Select an Agent</div>
            <div class="chat-subtitle" id="chat-subtitle">Choose an agent from the sidebar to start chatting</div>
        </div>

        <div class="messages-container" id="messages-container">
            <div class="no-agent-selected">
                <div class="icon">💬</div>
                <div>Select an agent to start a conversation</div>
            </div>
        </div>

        <div class="typing-indicator" id="typing-indicator">
            <span>Agent is thinking</span>
            <div class="typing-dots">
                <div class="typing-dot"></div>
                <div class="typing-dot"></div>
                <div class="typing-dot"></div>
            </div>
        </div>

        <div class="message-input">
            <input type="text" class="input-field" id="message-input"
                   placeholder="Type your message..." disabled>
            <button class="send-button" id="send-button" disabled>Send</button>
        </div>
    </div>
    <!-- Progress Panel -->
    <div class="progress-panel" id="progress-panel">
        <div class="progress-header">
            <span>Live Progress</span>
            <button class="collapse-btn" id="progress-collapse">▶</button>
        </div>
        <div class="panel-content" id="progress-content">
            <div class="progress-section">
                <div class="section-title">Current Status</div>
                <div id="current-status">
                    <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 20px;">
                        No active execution
                    </div>
                </div>
            </div>

            <div class="progress-section">
                <div class="section-title">Performance Metrics</div>
                <div id="performance-metrics">
                    <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 10px;">
                        No metrics available
                    </div>
                </div>
            </div>

            <div class="progress-section">
                <div class="section-title">Meta Tools History</div>
                <div id="meta-tools-history">
                    <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 10px;">
                        No meta-tool activity
                    </div>
                </div>
            </div>

            <div class="progress-section">
                <div class="section-title">System Events</div>
                <div id="system-events">
                    <div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 10px;">
                        System idle
                    </div>
                </div>
            </div>
        </div>
    </div>
</div>

<script unSave="true">



    class AgentRegistryUI {
        constructor() {
            this.ws = null;
            this.currentAgent = null;
            this.sessionId = 'ui_session_' + Math.random().toString(36).substr(2, 9);
            this.isConnected = false;
            this.reconnectAttempts = 0;
            this.apiKey = null;
            this.maxReconnectAttempts = 10;
            this.reconnectDelay = 1000;

            this.panelStates = {
                sidebar: true,
                progress: true,
                mobile: 'chat'
            };

            this.agents = new Map();
            this.currentExecution = null;

            // NEW: Enhanced progress tracking
            this.progressHistory = [];
            this.maxProgressHistory = 200;
            this.expandedProgressItem = null;
            this.currentPerformanceMetrics = null;
            this.currentOutline = null;

            this.elements = {
                connectionStatus: document.getElementById('connection-status'),
                agentsContainer: document.getElementById('agents-container'),
                chatTitle: document.getElementById('chat-title'),
                chatSubtitle: document.getElementById('chat-subtitle'),
                messagesContainer: document.getElementById('messages-container'),
                messageInput: document.getElementById('message-input'),
                sendButton: document.getElementById('send-button'),
                typingIndicator: document.getElementById('typing-indicator'),
                serverInfo: document.getElementById('server-info'),

                // API Key elements
                apiKeyModal: document.getElementById('api-key-modal'),
                apiKeyInput: document.getElementById('api-key-input'),
                apiKeySubmit: document.getElementById('api-key-submit'),

                // Panel control elements
                sidebarToggle: document.getElementById('sidebar-toggle'),
                progressToggle: document.getElementById('progress-toggle'),
                sidebarCollapse: document.getElementById('sidebar-collapse'),
                progressCollapse: document.getElementById('progress-collapse'),
                mainContainer: document.querySelector('.main-container'),
                sidebar: document.getElementById('sidebar'),
                progressPanel: document.getElementById('progress-panel'),
                progressContent: document.getElementById('progress-content')
            };

            // Enhanced cleanup timer
            setInterval(() => {
                if (this.isTyping && this.currentExecution) {
                    const timeSinceLastUpdate = Date.now() - this.currentExecution.lastUpdate;
                    if (timeSinceLastUpdate > 30000) {
                        console.log('🧹 Cleanup: Hiding stuck typing indicator');
                        this.showTypingIndicator(false);
                        this.currentExecution = null;
                        this.updateCurrentStatusToIdle();
                    }
                }
            }, 5000);

            this.init();
        }

        init() {
            this.setupEventListeners();
            this.setupPanelControls();
            this.initializeProgressPanel();
            this.showApiKeyModal();
        }

        // NEW: Initialize the refactored progress panel
        initializeProgressPanel() {
            if (this.elements.progressContent) {
                this.elements.progressContent.innerHTML = `
                <div class="progress-section metrics-section">
                    <div class="section-title expandable-section" onclick="window.agentUI.toggleSection('metrics')">
                        <span>📊 Performance Metrics</span>
                        <span class="section-toggle">▼</span>
                    </div>
                    <div class="section-content" id="performance-metrics">
                        <div class="no-data">No metrics available</div>
                    </div>
                </div>

                <div class="progress-section outline-section">
                    <div class="section-title expandable-section" onclick="window.agentUI.toggleSection('outline')">
                        <span>🗺️ Execution Outline & Context</span>
                        <span class="section-toggle">▼</span>
                    </div>
                    <div class="section-content" id="execution-outline">
                        <div class="no-data">No outline available</div>
                    </div>
                </div>

                <div class="progress-section status-history-section">
                    <div class="section-title expandable-section" onclick="window.agentUI.toggleSection('status')">
                        <span>⚡ Status & History</span>
                        <span class="section-toggle">▼</span>
                    </div>
                    <div class="section-content expanded" id="status-history">
                        <div class="no-data">No active execution</div>
                    </div>
                </div>
            `;
            }
        }

        // NEW: Toggle progress panel sections
        toggleSection(sectionName) {
            const section = document.querySelector(`.${sectionName}-section .section-content`);
            const toggle = document.querySelector(`.${sectionName}-section .section-toggle`);

            if (!section || !toggle) return;

            const isExpanded = section.classList.contains('expanded');

            if (isExpanded) {
                section.classList.remove('expanded');
                toggle.textContent = '▼';
            } else {
                section.classList.add('expanded');
                toggle.textContent = '▲';
            }
        }

        // REFACTORED: Main message handler with unified progress system
        handleWebSocketMessage(data) {
            console.log('WebSocket message received:', data);

            if (data.event === 'execution_progress') {
                const executionData = data.data;
                if (executionData && executionData.payload) {
                    this.handleUnifiedProgressEvent(executionData);
                }
                return;
            }

            if (data.request_id && data.payload) {
                this.handleUnifiedProgressEvent(data);
                return;
            }

            if (data.event) {
                this.handleRegistryEvent(data);
                return;
            }

            console.log('Unhandled message format:', data);
        }

        // NEW: Unified progress event handler
        // REPLACE the existing handleUnifiedProgressEvent method
        handleUnifiedProgressEvent(eventData) {
            const payload = eventData.payload;
            const eventType = payload.event_type;
            const isFinal = eventData.is_final;
            const requestId = eventData.request_id;

            console.log(`🎯 Processing Event: ${eventType}`, payload);

            // Handle final events
            if (isFinal || eventType === 'execution_complete' || payload.status === 'completed') {
                this.showTypingIndicator(false);

                const result = payload.metadata?.result || payload.result || payload.response || payload.output;
                if (result && typeof result === 'string' && result.trim()) {
                    this.addMessage('agent', result);
                }

                this.currentExecution = null;
                this.updateCurrentStatusToIdle();
                return;
            }

            // Initialize execution tracking
            if (!this.currentExecution) {
                this.currentExecution = {
                    requestId,
                    startTime: Date.now(),
                    events: [],
                    lastUpdate: Date.now()
                };
                this.showTypingIndicator(true);
            }

            // ADD: Store ALL events in progress history
            this.addToProgressHistory(payload);

            // Handle chat integration for important events
            this.handleChatIntegration(payload);

            // Update performance metrics
            this.updatePerformanceMetricsFromEvent(payload);

            // Update execution outline
            this.updateExecutionOutlineFromEvent(payload);

            // Refresh status history (shows all events)
            this.refreshStatusHistory();

            // Update current execution
            if (this.currentExecution) {
                this.currentExecution.events.push({...payload, timestamp: Date.now()});
                this.currentExecution.lastUpdate = Date.now();
            }
        }

        // NEW: Add event to progress history
        // UPDATE the addToProgressHistory method to ensure all events are captured
        addToProgressHistory(payload) {
        const irrelevantEventTypes = ['node_phase', 'node_enter']; // Fügen Sie hier weitere Typen hinzu, falls nötig

    // Prüfen, ob der Event-Typ in der Liste der irrelevanten Typen ist
    if (irrelevantEventTypes.includes(payload.event_type)) {
        // Optional: Hier könnte man das Event kurz an anderer Stelle anzeigen,
        // aber wir speichern es nicht im langfristigen Verlauf.
        console.log(`📝 Skipping storage for irrelevant event: ${payload.event_type}`);
        return; // Die Funktion hier beenden, um das Speichern zu verhindern
    }

            // Generate consistent ID for events
            const eventId = payload.event_id || `event_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;

            const historyItem = {
                ...payload,
                timestamp: payload.timestamp || Date.now(),
                id: eventId
            };

            // Remove any existing event with same ID to avoid duplicates
            this.progressHistory = this.progressHistory.filter(item => item.id !== eventId);

            this.progressHistory.unshift(historyItem);

            if (this.progressHistory.length - 10 > this.maxProgressHistory) {
                this.progressHistory = this.progressHistory.slice(0, this.maxProgressHistory-50);
            }

            console.log(`📝 Added to progress history: ${payload.event_type}`, historyItem);
        }

        // NEW: Refresh unified status history display
        refreshStatusHistory() {
            const container = document.getElementById('status-history');
            if (!container) return;

            if (this.progressHistory.length === 0) {
                container.innerHTML = '<div class="no-data">No events recorded</div>';
                return;
            }

            container.innerHTML = '';

            this.progressHistory.forEach((event, index) => {
                const eventElement = this.createExpandableProgressItem(event, index === 0);
                container.appendChild(eventElement);
            });
        }

        // NEW: Create expandable progress item (only one expandable at a time)
// UPDATE the createExpandableProgressItem method to show more LLM details
        createExpandableProgressItem(event, isLatest = false) {
            const div = document.createElement('div');
            div.className = `progress-item expandable ${isLatest ? 'latest' : ''} ${event.event_type}`;
            div.setAttribute('data-event-id', event.id);

            const icon = this.getEventIcon(event.event_type, event.status);
            const title = this.getDisplayAction(event.event_type, event);
            const timestamp = new Date((event.timestamp || Date.now()) * (event.timestamp > 10000000000 ? 1 : 1000)).toLocaleTimeString();
            const status = event.status || 'unknown';

            // ADD: Special summary for LLM calls
            let summaryDetails = '';
            if (event.node_name) summaryDetails += `${event.node_name} • `;
            summaryDetails += timestamp;

            if (event.event_type === 'llm_call') {
                if (event.llm_temperature !== undefined) summaryDetails += ` • Temp: ${event.llm_temperature}`;
                if (event.llm_total_tokens) summaryDetails += ` • ${event.llm_total_tokens} tokens`;
                if (event.llm_cost) summaryDetails += ` • $${event.llm_cost.toFixed(4)}`;
                if (event.duration) summaryDetails += ` • ${event.duration.toFixed(2)}s`;
            } else {
                if (event.duration) summaryDetails += ` • ${event.duration.toFixed(2)}s`;
            }

            div.innerHTML = `
        <div class="progress-item-header" onclick="window.agentUI.toggleProgressItem('${event.id}')">
            <div class="progress-icon">${icon}</div>
            <div class="progress-title">${title}</div>
            <div class="progress-meta">
                <span class="progress-status ${status}">${status}</span>
                <span class="expand-indicator">▼</span>
            </div>
        </div>
        <div class="progress-summary">
            ${summaryDetails}
        </div>
        <div class="progress-item-expanded" id="expanded-${event.id}">
            ${this.createExpandedEventContent(event)}
        </div>
    `;

            return div;
        }
        // NEW: Toggle progress item (only one at a time)
        toggleProgressItem(eventId) {
            if (this.expandedProgressItem && this.expandedProgressItem !== eventId) {
                this.closeProgressItem(this.expandedProgressItem);
            }

            const expandedContent = document.getElementById(`expanded-${eventId}`);
            const progressItem = document.querySelector(`[data-event-id="${eventId}"]`);
            const indicator = progressItem?.querySelector('.expand-indicator');

            if (!expandedContent || !progressItem) return;

            const isExpanded = expandedContent.classList.contains('active');

            if (isExpanded) {
                expandedContent.classList.remove('active');
                progressItem.classList.remove('expanded');
                if (indicator) indicator.textContent = '▼';
                this.expandedProgressItem = null;
            } else {
                expandedContent.classList.add('active');
                progressItem.classList.add('expanded');
                if (indicator) indicator.textContent = '▲';
                this.expandedProgressItem = eventId;
            }
        }

        // NEW: Close progress item
        closeProgressItem(eventId) {
            const expandedContent = document.getElementById(`expanded-${eventId}`);
            const progressItem = document.querySelector(`[data-event-id="${eventId}"]`);
            const indicator = progressItem?.querySelector('.expand-indicator');

            if (expandedContent) expandedContent.classList.remove('active');
            if (progressItem) progressItem.classList.remove('expanded');
            if (indicator) indicator.textContent = '▼';
        }

        // NEW: Create detailed expanded content
// ADD this method to create comprehensive event details
        createExpandedEventContent(event) {
            const sections = [];

            // Core Information
            const coreInfo = this.extractCoreFields(event);
            if (Object.keys(coreInfo).length > 0) {
                sections.push(this.createEventSection('Core Information', coreInfo));
            }

            // Timing Information
            const timingInfo = this.extractTimingFields(event);
            if (Object.keys(timingInfo).length > 0) {
                sections.push(this.createEventSection('Timing & Status', timingInfo));
            }

            // LLM Information
            const llmInfo = this.extractLLMFields(event);
            if (Object.keys(llmInfo).length > 0) {
                sections.push(this.createEventSection('LLM Details', llmInfo));
            }

            // Tool Information
            const toolInfo = this.extractToolFields(event);
            if (Object.keys(toolInfo).length > 0) {
                sections.push(this.createEventSection('Tool Details', toolInfo));
            }

            // Performance Information
            const perfInfo = this.extractPerformanceFields(event);
            if (Object.keys(perfInfo).length > 0) {
                sections.push(this.createEventSection('Performance', perfInfo));
            }

            // Reasoning Context
            const reasoningInfo = this.extractReasoningFields(event);
            if (Object.keys(reasoningInfo).length > 0) {
                sections.push(this.createEventSection('Reasoning Context', reasoningInfo));
            }

            // Error Information
            const errorInfo = this.extractErrorFields(event);
            if (Object.keys(errorInfo).length > 0) {
                sections.push(this.createEventSection('Error Details', errorInfo));
            }

            // Raw Data
            const rawData = this.extractRawDataFields(event);
            if (Object.keys(rawData).length > 0) {
                sections.push(this.createEventSection('Raw Data', rawData));
            }

            return sections.join('') || '<div class="no-expanded-data">No detailed information available</div>';
        }

        // NEW: Create event section for expanded view
        createEventSection(title, fields) {
            const fieldsHtml = Object.entries(fields)
                .map(([key, value]) => {
                    if (typeof value === 'object' && value.type === 'json') {
                        return `
                        <div class="event-field">
                            <div class="event-field-label">${key}:</div>
                            <div class="event-field-value json">${value.value}</div>
                        </div>
                    `;
                    } else {
                        return `
                        <div class="event-field">
                            <div class="event-field-label">${key}:</div>
                            <div class="event-field-value">${value}</div>
                        </div>
                    `;
                    }
                })
                .join('');

            return `
            <div class="expanded-section">
                <div class="expanded-section-title">${title}</div>
                ${fieldsHtml}
            </div>
        `;
        }

        // ENHANCED: Chat integration for reasoning loops and task execution
        handleChatIntegration(payload) {
            const eventType = payload.event_type;
            const metadata = payload.metadata || {};
            switch (eventType) {
                case 'reasoning_loop':
                    if (metadata.outline_step && metadata.outline_total) {
                        this.handleOutlineStepInChat(payload);
                    }
                    break;
                case 'outline_created':
                    this.handleOutlineCreatedInChat(payload);
                    break;
                case 'task_start':
                case 'task_complete':
                case 'task_error':
                    this.handleTaskProgressInChat(payload);
                    break;
                case 'plan_created':
                    this.handlePlanCreatedInChat(payload);
                    break;
                case 'tool_call':
                    // Only show important tool calls in chat
                    if (payload.tool_name && !payload.tool_name.includes('internal')) {
                        this.handleToolCallInChat(payload);
                    }
                    break;
            }
        }

        // ADD this method for plan creation
handlePlanCreatedInChat(payload) {
    const metadata = payload.metadata || {};
    const planName = metadata.plan_name || 'Execution Plan';
    const taskCount = metadata.task_count || 0;
    const strategy = metadata.strategy || 'sequential';

    const planDiv = document.createElement('div');
    planDiv.className = 'thinking-step plan-created';
    planDiv.innerHTML = `
        <div class="thinking-step-header">
            <span>📋</span>
            <span>${planName} Created</span>
            <span class="step-status completed">Ready</span>
        </div>
        <div class="thinking-step-content">
            <div class="plan-details">
                <div class="plan-info">
                    <span class="plan-item">Tasks: ${taskCount}</span>
                    <span class="plan-item">Strategy: ${strategy}</span>
                </div>
                <div class="plan-ready">
                    <em>🚀 Plan ready for execution</em>
                </div>
            </div>
        </div>
    `;

    this.elements.messagesContainer.appendChild(planDiv);
    this.scrollToBottom();
}

        // ADD this new method for outline step progress
handleOutlineStepInChat(payload) {
    const metadata = payload.metadata || {};
    const outlineStep = metadata.outline_step || 0;
    const outlineTotal = metadata.outline_total || 0;
    const loopNumber = metadata.loop_number || 0;
    const status = payload.status || 'running';

    if (outlineStep === 0 || outlineTotal === 0) return;

    const progressPercentage = Math.round((outlineStep / outlineTotal) * 100);
    const isCompleted = status === 'completed';

    const stepDiv = document.createElement('div');
    stepDiv.className = `thinking-step outline-step ${isCompleted ? 'completed' : 'running'}`;

    let stepTitle = `Outline Step ${outlineStep} of ${outlineTotal}`;
    let stepIcon = isCompleted ? '✅' : '🗺️';
    let stepStatus = isCompleted ? 'Completed' : 'In Progress';

    stepDiv.innerHTML = `
        <div class="thinking-step-header">
            <span>${stepIcon}</span>
            <span>${stepTitle}</span>
            <span class="step-status ${status}">${stepStatus}</span>
        </div>
        <div class="thinking-step-content">
            <div class="outline-progress">
                <div class="progress-info">
                    <span class="progress-text">Execution Progress</span>
                    <span class="progress-percentage">${progressPercentage}%</span>
                </div>
                <div class="progress-bar-container">
                    <div class="progress-bar">
                        <div class="progress-bar-fill" style="width: ${progressPercentage}%"></div>
                    </div>
                </div>
                ${isCompleted ?
                    '<div class="step-completed">This execution step is now complete</div>' :
                    '<div class="step-working">Working on this step...</div>'
                }
            </div>

            ${metadata.context_size || metadata.task_stack_size ? `
                <div class="context-info">
                    ${metadata.context_size ? `<span class="context-item">Context: ${metadata.context_size}</span>` : ''}
                    ${metadata.task_stack_size ? `<span class="context-item">Tasks: ${metadata.task_stack_size}</span>` : ''}
                </div>
            ` : ''}
        </div>
    `;

    this.elements.messagesContainer.appendChild(stepDiv);
    this.scrollToBottom();
}


        // NEW: Handle outline creation with detailed information
// REPLACE the existing handleOutlineCreatedInChat method
handleOutlineCreatedInChat(payload) {
    const metadata = payload.metadata || {};
    const outline = metadata.outline;

    if (!outline) return;

    const outlineDiv = document.createElement('div');
    outlineDiv.className = 'thinking-step outline-created';
    outlineDiv.innerHTML = `
        <div class="thinking-step-header">
            <span>📋</span>
            <span>Execution Plan Created</span>
            <span class="step-status completed">Ready</span>
        </div>
        <div class="thinking-step-content">
            <div class="outline-content">
                ${this.formatOutlineForChat(outline)}
            </div>
            <div class="outline-ready">
                <em>✨ Ready to execute plan step by step</em>
            </div>
        </div>
    `;

    this.elements.messagesContainer.appendChild(outlineDiv);
    this.scrollToBottom();
}

        // NEW: Handle task execution progress cleanly
        handleTaskProgressInChat(payload) {
            const eventType = payload.event_type;
            const taskId = payload.task_id;
            const metadata = payload.metadata || {};
            const description = metadata.description || 'Task execution';
            const taskType = metadata.type || 'Task';
            const priority = metadata.priority || 'normal';

            let icon = '📋';
            let status = '';
            let statusClass = 'running';

            if (eventType === 'task_start') {
                icon = '▶️';
                status = 'Starting';
                statusClass = 'starting';
            } else if (eventType === 'task_complete') {
                icon = '✅';
                status = 'Completed';
                statusClass = 'completed';
            } else if (eventType === 'task_error') {
                icon = '❌';
                status = 'Failed';
                statusClass = 'error';
            }

            const taskDiv = document.createElement('div');
            taskDiv.className = `thinking-step task-progress ${statusClass}`;
            taskDiv.innerHTML = `
            <div class="thinking-step-header">
                <span>${icon}</span>
                <span>${taskType} ${status}</span>
                <span class="priority-badge ${priority}">${priority}</span>
            </div>
            <div class="thinking-step-content">
                <div class="task-description">${description}</div>
                ${payload.duration ? `<div class="task-timing">Duration: ${payload.duration.toFixed(2)}s</div>` : ''}
                ${eventType === 'task_error' && payload.error_details?.message ?
                `<div class="task-error">Error: ${payload.error_details.message}</div>` : ''}
            </div>
        `;

            this.elements.messagesContainer.appendChild(taskDiv);
            this.scrollToBottom();
        }

        // NEW: Handle tool calls in chat
        handleToolCallInChat(payload) {
            const toolName = payload.tool_name;
            const status = payload.status;

            if (status === 'running') return; // Only show completed tool calls

            const toolDiv = document.createElement('div');
            toolDiv.className = `thinking-step tool-call ${status}`;
            toolDiv.innerHTML = `
            <div class="thinking-step-header">
                <span>🔧</span>
                <span>Used ${toolName}</span>
                <span class="tool-status ${status}">${status}</span>
            </div>
            <div class="thinking-step-content">
                <div class="tool-result">
                    ${status === 'completed' ? 'Tool executed successfully' : 'Tool execution failed'}
                    ${payload.duration ? ` in ${payload.duration.toFixed(2)}s` : ''}
                </div>
            </div>
        `;

            this.elements.messagesContainer.appendChild(toolDiv);
            this.scrollToBottom();
        }

        // NEW: Format outline for chat display
        formatOutlineForChat(outline) {
            if (typeof outline === 'string') {
                return `<div class="outline-text">${outline}</div>`;
            }

            if (Array.isArray(outline)) {
                return `
                <div class="outline-steps">
                    ${outline.map((step, index) =>
                    `<div class="outline-step">
                            <span class="step-number">${index + 1}.</span>
                            <span class="step-text">${step}</span>
                        </div>`
                ).join('')}
                </div>
            `;
            }

            return '<div class="outline-text">Execution plan created</div>';
        }

        // NEW: Create progress bar
        createProgressBar(current, total) {
            if (!total || total === 0) return '';

            const percentage = Math.round((current / total) * 100);

            return `
            <div class="progress-bar-container">
                <div class="progress-bar-info">
                    <span>Progress</span>
                    <span>${percentage}%</span>
                </div>
                <div class="progress-bar">
                    <div class="progress-bar-fill" style="width: ${percentage}%"></div>
                </div>
            </div>
        `;
        }

        // ENHANCED: Update performance metrics
        updatePerformanceMetricsFromEvent(payload) {
            const metadata = payload.metadata || {};
            const performance = metadata.performance_metrics;

            if (performance && Object.keys(performance).length > 0) {
                this.currentPerformanceMetrics = performance;
                this.refreshPerformanceMetrics();
            }
        }

        // NEW: Refresh performance metrics display
        refreshPerformanceMetrics() {
            const container = document.getElementById('performance-metrics');
            if (!container || !this.currentPerformanceMetrics) return;

            const metrics = {
                'Action Efficiency': `${Math.round((this.currentPerformanceMetrics.action_efficiency || 0) * 100)}%`,
                'Avg Loop Time': `${(this.currentPerformanceMetrics.avg_loop_time || 0).toFixed(1)}s`,
                'Progress Rate': `${Math.round((this.currentPerformanceMetrics.progress_rate || 0) * 100)}%`,
                'Total Loops': this.currentPerformanceMetrics.total_loops || 0,
                'Progress Loops': this.currentPerformanceMetrics.progress_loops || 0
            };

            container.innerHTML = `
            <div class="metrics-grid">
                ${Object.entries(metrics).map(([key, value]) => `
                    <div class="metric-card">
                        <div class="metric-label">${key}</div>
                        <div class="metric-value">${value}</div>
                    </div>
                `).join('')}
            </div>
        `;
        }

        // NEW: Update execution outline
        updateExecutionOutlineFromEvent(payload) {
            const eventType = payload.event_type;
            const metadata = payload.metadata || {};

            if (eventType === 'outline_created' || eventType === 'reasoning_loop') {
                const outlineContainer = document.getElementById('execution-outline');
                if (!outlineContainer) return;

                const outline = metadata.outline;
                const outlineStep = metadata.outline_step || 0;
                const outlineTotal = metadata.outline_total || 0;
                const contextSize = metadata.context_size || 0;
                const taskStackSize = metadata.task_stack_size || 0;

                outlineContainer.innerHTML = `
                <div class="outline-info">
                    <div class="context-metrics">
                        <div class="context-metric">
                            <span class="context-label">Context Size:</span>
                            <span class="context-value">${contextSize}</span>
                        </div>
                        <div class="context-metric">
                            <span class="context-label">Task Stack:</span>
                            <span class="context-value">${taskStackSize}</span>
                        </div>
                        <div class="context-metric">
                            <span class="context-label">Progress:</span>
                            <span class="context-value">${outlineStep}/${outlineTotal}</span>
                        </div>
                    </div>

                    ${outlineTotal > 0 ? this.createProgressBar(outlineStep, outlineTotal) : ''}
                </div>

                ${outline ? `
                    <div class="outline-details">
                        <div class="outline-title">Current Plan</div>
                        ${this.formatOutlineForChat(outline)}
                    </div>
                ` : ''}
            `;
            }
        }

        // Helper methods for field extraction (using existing implementations)
        extractCoreFields(event) {
            const fields = {};
            if (event.event_type) fields['Event Type'] = event.event_type.replace(/_/g, ' ').toUpperCase();
            if (event.node_name) fields['Node'] = event.node_name;
            if (event.agent_name) fields['Agent'] = event.agent_name;
            if (event.task_id) fields['Task ID'] = event.task_id;
            if (event.plan_id) fields['Plan ID'] = event.plan_id;
            if (event.timestamp) fields['Timestamp'] = new Date((event.timestamp > 10000000000 ? event.timestamp : event.timestamp * 1000)).toLocaleString();
            return fields;
        }

// REPLACE the existing extractLLMFields method
        extractLLMFields(event) {
            const fields = {};
            const metadata = event.metadata || {};

            if (event.llm_model) fields['Model'] = event.llm_model;
            if (event.llm_temperature !== undefined) fields['Temperature'] = event.llm_temperature;
            if (event.llm_prompt_tokens) fields['Prompt Tokens'] = event.llm_prompt_tokens.toLocaleString();
            if (event.llm_completion_tokens) fields['Completion Tokens'] = event.llm_completion_tokens.toLocaleString();
            if (event.llm_total_tokens) fields['Total Tokens'] = event.llm_total_tokens.toLocaleString();
            if (event.llm_cost) fields['Cost'] = `$${event.llm_cost.toFixed(4)}`;

            // ADD: Model preferences and metadata
            if (metadata.model_preference) fields['Model Preference'] = metadata.model_preference;

            return fields;
        }

        extractToolFields(event) {
            const fields = {};
            const metadata = event.metadata || {};

            if (event.tool_name) fields['Tool Name'] = event.tool_name;
            if (metadata.meta_tool_name) fields['Meta Tool Name'] = metadata.meta_tool_name;

            if (event.is_meta_tool !== null && event.is_meta_tool !== undefined) {
                fields['Is Meta Tool'] = event.is_meta_tool ? '✅ Yes' : '❌ No';
            }

            // ADD: Tool execution details
            if (metadata.execution_phase) fields['Execution Phase'] = metadata.execution_phase;
            if (metadata.reasoning_loop) fields['Reasoning Loop'] = metadata.reasoning_loop;
            if (metadata.parsed_args && metadata.parsed_args.confidence_level) {
                fields['Confidence Level'] = `${Math.round(metadata.parsed_args.confidence_level * 100)}%`;
            }

            return fields;
        }

// ADD these helper methods for comprehensive data extraction
        extractTimingFields(event) {
            const fields = {};

            if (event.status) {
                fields['Status'] = `<span class="event-status-badge ${event.status}">${event.status.toUpperCase()}</span>`;
            }
            if (event.success !== null && event.success !== undefined) {
                fields['Success'] = event.success ? '✅ Yes' : '❌ No';
            }
            if (event.timestamp) {
                fields['Timestamp'] = new Date((event.timestamp > 10000000000 ? event.timestamp : event.timestamp * 1000)).toLocaleString();
            }
            if (event.duration) {
                fields['Duration'] = `${event.duration.toFixed(3)}s`;
            }
            if (event.node_duration) {
                fields['Node Duration'] = `${event.node_duration.toFixed(3)}s`;
            }
            if (event.routing_decision) {
                fields['Next Step'] = event.routing_decision;
            }

            return fields;
        }

        extractErrorFields(event) {
            const fields = {};

            if (event.error_details) {
                const errorDetails = event.error_details;
                if (errorDetails.message) fields['Error Message'] = errorDetails.message;
                if (errorDetails.type) fields['Error Type'] = errorDetails.type;
                if (errorDetails.traceback) {
                    fields['Traceback'] = {
                        type: 'json',
                        value: errorDetails.traceback
                    };
                }
            }

            if (event.tool_error) {
                fields['Tool Error'] = event.tool_error;
            }

            return fields;
        }
// REPLACE the existing extractRawDataFields method
        extractRawDataFields(event) {
            const fields = {};

            // ADD: Full LLM Input/Output for LLM calls
            if (event.event_type === 'llm_call') {
                if (event.llm_input) {
                    fields['LLM Input (Full Prompt)'] = {
                        type: 'json',
                        value: event.llm_input
                    };
                }

                if (event.llm_output) {
                    fields['LLM Output (Response)'] = {
                        type: 'json',
                        value: event.llm_output
                    };
                }
            }

            // Show other raw data for tool calls
            if (event.tool_args && typeof event.tool_args === 'object') {
                fields['Tool Arguments'] = {
                    type: 'json',
                    value: JSON.stringify(event.tool_args, null, 2)
                };
            }

            if (event.tool_result) {
                const resultStr = typeof event.tool_result === 'string' ?
                    event.tool_result :
                    JSON.stringify(event.tool_result, null, 2);

                fields['Tool Result'] = {
                    type: 'json',
                    value: resultStr.length > 1000 ?
                        resultStr.substring(0, 1000) + '\\n\\n... [truncated]' :
                        resultStr
                };
            }

            return fields;
        }

        extractPerformanceFields(event) {
            const fields = {};
            const metadata = event.metadata || {};
            const performance = metadata.performance_metrics || {};

            if (performance.action_efficiency) fields['Action Efficiency'] = `${Math.round(performance.action_efficiency * 100)}%`;
            if (performance.avg_loop_time) fields['Avg Loop Time'] = `${performance.avg_loop_time.toFixed(2)}s`;
            if (performance.progress_rate) fields['Progress Rate'] = `${Math.round(performance.progress_rate * 100)}%`;

            return fields;
        }

        extractReasoningFields(event) {
            const fields = {};
            const metadata = event.metadata || {};

            if (metadata.outline_step && metadata.outline_total) {
                fields['Outline Progress'] = `${metadata.outline_step}/${metadata.outline_total}`;
            }
            if (metadata.loop_number) fields['Loop Number'] = metadata.loop_number;
            if (metadata.context_size) fields['Context Size'] = metadata.context_size.toLocaleString();
            if (metadata.task_stack_size) fields['Task Stack Size'] = metadata.task_stack_size;

            return fields;
        }

        extractMetadata(event) {
            const fields = {};
            const metadata = event.metadata || {};

            // Show complex data as JSON
            const complexFields = ['tool_args', 'tool_result', 'llm_input', 'llm_output', 'error_details'];

            for (const field of complexFields) {
                if (event[field] && typeof event[field] === 'object') {
                    fields[field.replace(/_/g, ' ').toUpperCase()] = {
                        type: 'json',
                        value: JSON.stringify(event[field], null, 2)
                    };
                }
            }

            return fields;
        }

        // Enhanced helper methods
        // REPLACE the existing getDisplayAction method
        getDisplayAction(eventType, payload) {
            const metadata = payload.metadata || {};
            switch (eventType) {
                case 'reasoning_loop':
                    const step = metadata.outline_step || 0;
                    const total = metadata.outline_total || 0;
                    return step > 0 ? `Reasoning Step ${step}/${total}` : 'Deep Reasoning';
                case 'task_start':
                    return `Starting: ${metadata.description || 'Task'}`;
                case 'task_complete':
                    return `Completed: ${metadata.description || 'Task'}`;
                case 'task_error':
                    return `Failed: ${metadata.description || 'Task'}`;
                case 'tool_call':
                    const status = payload.status || 'running';
                    const toolName = payload.tool_name || 'Unknown Tool';
                    return `${status === 'running' ? 'Calling' : 'Called'} ${toolName}`;

                case 'llm_call':
                    const llmStatus = payload.status || 'running';
                    const model = payload.llm_model || 'LLM';
                    const taskId = payload.task_id || '';

                    // Show more context for LLM calls
                    let displayText = `${llmStatus === 'running' ? '🔄 Calling' : '✅ Called'} ${model}`;
                    if (taskId && taskId !== 'unknown') {
                        displayText += ` (${taskId})`;
                    }
                    return displayText;
                case 'plan_created':
                    return `Plan: ${metadata.plan_name || 'Execution Plan'}`;
                case 'outline_created':
                    return 'Execution Outline Created';
                case 'node_enter':
                    return `Started: ${payload.node_name || 'Processing'}`;
                case 'node_exit':
                    return `Finished: ${payload.node_name || 'Processing'}`;
                case 'node_phase':
                    return `${payload.node_name || 'Node'}: ${payload.node_phase || 'Processing'}`;
                case 'execution_start':
                    return 'Execution Started';
                case 'execution_complete':
                    return 'Execution Complete';
                // ADD: Meta tool events
                case 'meta_tool_call':
                    const metaToolName = metadata.meta_tool_name || payload.tool_name || 'Meta Tool';
                    const metaStatus = payload.status || 'running';
                    return `${metaStatus === 'running' ? 'Using' : 'Used'} ${metaToolName.replace(/_/g, ' ')}`;
                // ADD: Error events
                case 'error':
                    return `Error in ${payload.node_name || 'System'}`;
                default:
                    return eventType.replace(/_/g, ' ').replace(/\b\w/g, l => l.toUpperCase());
            }
        }

        // REPLACE the existing getEventIcon method
        getEventIcon(eventType, status) {
            if (status === 'error' || status === 'failed') return '❌';
            if (status === 'completed') return '✅';

            switch (eventType) {
                case 'reasoning_loop': return '🧠';
                case 'task_start': return '▶️';
                case 'task_complete': return '✅';
                case 'task_error': return '❌';
                case 'tool_call': return '🔧';
                case 'llm_call': return '💭';
                case 'plan_created': return '📋';
                case 'outline_created': return '🗺️';
                case 'node_enter': return '🚀';
                case 'node_exit': return '🏁';
                case 'node_phase': return '⚙️';
                case 'execution_start': return '🎬';
                case 'execution_complete': return '🎉';
                case 'meta_tool_call': return '🛠️';
                case 'error': return '🚨';
                default: return '⚡';
            }
        }

        updateCurrentStatusToIdle() {
            const container = document.getElementById('status-history');
            if (container && container.children.length === 0) {
                container.innerHTML = `
                <div class="progress-item idle-status">
                    <div class="progress-item-header">
                        <div class="progress-icon">💤</div>
                        <div class="progress-title">Ready & Waiting</div>
                        <div class="progress-meta">
                            <span class="progress-status idle">idle</span>
                        </div>
                    </div>
                    <div class="progress-summary">
                        Agent ready for next message • ${new Date().toLocaleTimeString()}
                    </div>
                </div>
            `;
            }
        }

        showTypingIndicator(show) {
            console.log(`💭 ${show ? 'Showing' : 'Hiding'} typing indicator`);
            this.elements.typingIndicator.classList.toggle('active', show);
            if (show) {
                this.elements.typingIndicator.scrollIntoView({ behavior: 'smooth', block: 'end' });
            }
            this.isTyping = show;
        }

        scrollToBottom() {
            if (this.elements.messagesContainer) {
                this.elements.messagesContainer.scrollTop = this.elements.messagesContainer.scrollHeight;
            }
        }

        showApiKeyModal() {
            const storedKey = localStorage.getItem('agent_registry_api_key');
            if (storedKey) {
                this.apiKey = storedKey;
                this.elements.apiKeyModal.style.display = 'none';
                this.connect();
            } else {
                this.elements.apiKeyModal.style.display = 'flex';
            }
        }

        async validateAndStoreApiKey() {
            const apiKey = this.elements.apiKeyInput.value.trim();
            if (!apiKey) {
                this.showError('Please enter an API key');
                return;
            }

            if (!apiKey.startsWith('tbk_')) {
                this.showError('Invalid API key format (should start with tbk_)');
                return;
            }

            this.apiKey = apiKey;
            this.elements.apiKeyModal.style.display = 'none';
            this.connect();
        }

        setupPanelControls() {
            this.elements.sidebarToggle?.addEventListener('click', () => this.togglePanel('sidebar'));
            this.elements.progressToggle?.addEventListener('click', () => this.togglePanel('progress'));
            this.elements.sidebarCollapse?.addEventListener('click', () => this.togglePanel('sidebar'));
            this.elements.progressCollapse?.addEventListener('click', () => this.togglePanel('progress'));

            const mobileTabs = document.querySelectorAll('.mobile-tab');
            if (mobileTabs.length > 0) {
                mobileTabs.forEach(tab => {
                    tab.addEventListener('click', () => this.switchMobileTab(tab.dataset.tab));
                });
            }

            this.setupResponsiveHandlers();
        }

        togglePanel(panel) {
            this.panelStates[panel] = !this.panelStates[panel];
            this.updatePanelStates();
        }

        updatePanelStates() {
            const { sidebar, progress } = this.panelStates;

            if (this.elements.mainContainer) {
                this.elements.mainContainer.classList.remove('sidebar-collapsed', 'progress-collapsed', 'both-collapsed');
                if (!sidebar && !progress) {
                    this.elements.mainContainer.classList.add('both-collapsed');
                } else if (!sidebar) {
                    this.elements.mainContainer.classList.add('sidebar-collapsed');
                } else if (!progress) {
                    this.elements.mainContainer.classList.add('progress-collapsed');
                }
            }

            if (this.elements.sidebar) this.elements.sidebar.classList.toggle('collapsed', !sidebar);
            if (this.elements.progressPanel) this.elements.progressPanel.classList.toggle('collapsed', !progress);

            if (this.elements.sidebarToggle) {
                this.elements.sidebarToggle.classList.toggle('active', sidebar);
                this.elements.sidebarToggle.textContent = sidebar ? '📋 Agents' : '📋';
            }

            if (this.elements.progressToggle) {
                this.elements.progressToggle.classList.toggle('active', progress);
                this.elements.progressToggle.textContent = progress ? '📊 Progress' : '📊';
            }

            if (this.elements.sidebarCollapse) this.elements.sidebarCollapse.textContent = sidebar ? '◀' : '▶';
            if (this.elements.progressCollapse) this.elements.progressCollapse.textContent = progress ? '▶' : '◀';

            if (this.elements.mainContainer) this.elements.mainContainer.offsetHeight;
        }

        handleWindowResize() {
            const chatArea = document.querySelector('.chat-area');
            const mainContainer = this.elements.mainContainer;

            if (chatArea && mainContainer) {
                const currentDisplay = mainContainer.style.display;
                mainContainer.style.display = 'none';
                mainContainer.offsetHeight;
                mainContainer.style.display = currentDisplay || '';
            }
        }

        switchMobileTab(tab) {
            this.panelStates.mobile = tab;

            const mobileTabs = document.querySelectorAll('.mobile-tab');
            if (mobileTabs.length > 0) {
                mobileTabs.forEach(t => t.classList.toggle('active', t.dataset.tab === tab));
            }

            const sidebarEl = document.querySelector('.sidebar');
            const chatAreaEl = document.querySelector('.chat-area');
            const progressPanelEl = document.querySelector('.progress-panel');

            if (sidebarEl) sidebarEl.style.display = tab === 'agents' ? 'flex' : 'none';
            if (chatAreaEl) chatAreaEl.style.display = tab === 'chat' ? 'flex' : 'none';
            if (progressPanelEl) progressPanelEl.style.display = tab === 'progress' ? 'flex' : 'none';
        }

        setupResponsiveHandlers() {
            const mediaQuery = window.matchMedia('(max-width: 768px)');
            const handleResponsive = (e) => {
                if (e.matches) {
                    this.switchMobileTab(this.panelStates.mobile);
                } else {
                    const panels = document.querySelectorAll('.sidebar, .chat-area, .progress-panel');
                    panels.forEach(panel => { if (panel) panel.style.display = ''; });
                }
            };

            if (mediaQuery.addEventListener) {
                mediaQuery.addEventListener('change', handleResponsive);
            } else {
                mediaQuery.addListener(handleResponsive);
            }
            handleResponsive(mediaQuery);
        }

        setupEventListeners() {
            this.elements.apiKeySubmit?.addEventListener('click', () => this.validateAndStoreApiKey());
            window.addEventListener('resize', () => this.handleWindowResize());
            this.elements.apiKeyInput?.addEventListener('keypress', (e) => {
                if (e.key === 'Enter') this.validateAndStoreApiKey();
            });
            this.elements.sendButton.addEventListener('click', () => this.sendMessage());
            this.elements.messageInput.addEventListener('keypress', (e) => {
                if (e.key === 'Enter' && !e.shiftKey && this.currentAgent) {
                    e.preventDefault();
                    this.sendMessage();
                }
            });

            document.addEventListener('visibilitychange', () => {
                if (!document.hidden && (!this.ws || this.ws.readyState === WebSocket.CLOSED)) {
                    this.connect();
                }
            });
        }

        connect() {
            if (this.ws && this.ws.readyState === WebSocket.OPEN) return;

            this.updateConnectionStatus('connecting', 'Connecting...');

            try {
                const isLocal = window.location.hostname === 'localhost' || window.location.hostname === '127.0.0.1';
                const wsProtocol = isLocal ? 'ws' : 'wss';
                const wsUrl = `${wsProtocol}://${window.location.host}/ws/registry/ui_connect`;
                this.ws = new WebSocket(wsUrl);

                this.ws.onopen = () => {
                    this.isConnected = true;
                    this.reconnectAttempts = 0;
                    this.updateConnectionStatus('connected', 'Connected');
                    console.log('Connected to Registry Server');
                };

                this.ws.onmessage = (event) => {
                    try {
                        const data = JSON.parse(event.data);
                        this.handleWebSocketMessage(data);
                    } catch (error) {
                        console.error('Message parse error:', error);
                    }
                };

                this.ws.onclose = () => {
                    this.isConnected = false;
                    this.updateConnectionStatus('disconnected', 'Disconnected');
                    this.scheduleReconnection();
                };

                this.ws.onerror = (error) => {
                    console.error('WebSocket error:', error);
                    this.updateConnectionStatus('error', 'Connection Error');
                };

            } catch (error) {
                console.error('Connection error:', error);
                this.updateConnectionStatus('error', 'Connection Failed');
                this.scheduleReconnection();
            }
        }

        scheduleReconnection() {
            if (this.reconnectAttempts >= this.maxReconnectAttempts) {
                this.updateConnectionStatus('error', 'Connection Failed (Max attempts reached)');
                return;
            }

            this.reconnectAttempts++;
            const delay = Math.min(this.reconnectDelay * this.reconnectAttempts, 30000);

            this.updateConnectionStatus('connecting', `Reconnecting in ${delay/1000}s (attempt ${this.reconnectAttempts})`);

            setTimeout(() => {
                if (!this.isConnected) this.connect();
            }, delay);
        }

        updateConnectionStatus(status, text) {
            this.elements.connectionStatus.className = `status-indicator ${status}`;
            this.elements.connectionStatus.querySelector('span').textContent = text;
        }

        handleRegistryEvent(data) {
            const event = data.event;
            const payload = data.data || data;

            console.log(`📋 Registry Event: ${event}`, payload);

            switch (event) {
                case 'api_key_validation':
                    if (payload.valid) {
                        console.log('✅ API key validated successfully');
                    } else {
                        this.showError('❌ Invalid API key for this agent');
                        this.currentAgent = null;
                        this.elements.messageInput.disabled = true;
                        this.elements.sendButton.disabled = true;
                    }
                    break;
                case 'agents_list':
                    console.log('📝 Updating agents list:', payload.agents);
                    this.updateAgentsList(payload.agents);
                    break;
                case 'agent_registered':
                    console.log('🆕 Agent registered:', payload);
                    this.addAgent(payload);
                    break;
                case 'error':
                    console.error('❌ WebSocket error:', payload);
                    this.showError(payload.error || payload.message || 'Unknown error');
                    break;
                default:
                    console.log('❓ Unhandled registry event:', event, payload);
            }
        }

        updateAgentsList(agents) {
            this.elements.agentsContainer.innerHTML = '';

            if (!agents || agents.length === 0) {
                this.elements.agentsContainer.innerHTML = '<div style="color: var(--text-muted); font-size: 12px; text-align: center; padding: 20px;">No agents available</div>';
                return;
            }

            agents.forEach(agent => {
                this.agents.set(agent.public_agent_id, agent);
                const agentEl = this.createAgentElement(agent);
                this.elements.agentsContainer.appendChild(agentEl);
            });
        }

        createAgentElement(agent) {
            const div = document.createElement('div');
            div.className = 'agent-item';
            div.dataset.agentId = agent.public_agent_id;

            div.innerHTML = `
            <div class="agent-name">${agent.public_name}</div>
            <div class="agent-description">${agent.description || 'No description'}</div>
            <div class="agent-status ${agent.status}">
                <div class="status-dot"></div>
                <span>${agent.status.toUpperCase()}</span>
            </div>
        `;

            div.addEventListener('click', () => this.selectAgent(agent));
            return div;
        }

        selectAgent(agent) {
            if (!this.apiKey) {
                this.showError('Please set your API key first');
                return;
            }

            this.sendWebSocketMessage({
                event: 'validate_api_key',
                data: { public_agent_id: agent.public_agent_id, api_key: this.apiKey }
            });

            document.querySelectorAll('.agent-item').forEach(el => el.classList.remove('active'));
            document.querySelector(`[data-agent-id="${agent.public_agent_id}"]`)?.classList.add('active');

            this.currentAgent = agent;
            this.elements.chatTitle.textContent = agent.public_name;
            this.elements.chatSubtitle.textContent = agent.description || 'Ready for conversation';

            this.elements.messageInput.disabled = false;
            this.elements.sendButton.disabled = false;

            this.elements.messagesContainer.innerHTML = '';
            this.addMessage('agent', `Hello! I'm ${agent.public_name}. How can I help you?`);

            this.sendWebSocketMessage({
                event: 'subscribe_agent',
                data: { public_agent_id: agent.public_agent_id }
            });

            this.sendWebSocketMessage({
                event: 'get_agent_status',
                data: { public_agent_id: agent.public_agent_id }
            });

            // Reset progress panels
            this.progressHistory = [];
            this.refreshStatusHistory();
            const metricsContainer = document.getElementById('performance-metrics');
            if (metricsContainer) metricsContainer.innerHTML = '<div class="no-data">No metrics available</div>';
            const outlineContainer = document.getElementById('execution-outline');
            if (outlineContainer) outlineContainer.innerHTML = '<div class="no-data">No outline available</div>';
        }

        sendMessage() {
            if (!this.currentAgent || !this.elements.messageInput.value.trim()) return;

            const message = this.elements.messageInput.value.trim();
            this.addMessage('user', message);

            this.sendWebSocketMessage({
                event: 'chat_message',
                data: {
                    public_agent_id: this.currentAgent.public_agent_id,
                    message: message,
                    session_id: this.sessionId,
                    api_key: this.apiKey
                }
            });

            this.elements.messageInput.value = '';

            // Reset progress state
            this.progressHistory = [];
            this.expandedProgressItem = null;
            this.refreshStatusHistory();

            // Failsafe timeout
            setTimeout(() => {
                if (this.currentExecution) {
                    console.log('⏰ Timeout: Hiding typing indicator and resetting execution state');
                    this.showTypingIndicator(false);
                    this.currentExecution = null;
                    this.updateCurrentStatusToIdle();
                    this.showError('Agent response timeout - please try again');
                }
            }, 60000);
        }

        addMessage(sender, content) {
            const messageDiv = document.createElement('div');
            messageDiv.classList.add('message', sender);

            const avatar = document.createElement('div');
            avatar.classList.add('message-avatar');
            avatar.textContent = sender === 'user' ? 'U' : 'AI';

            const contentDiv = document.createElement('div');
            contentDiv.classList.add('message-content');

            if (sender === 'agent' && window.marked) {
                try {
                    contentDiv.innerHTML = marked.parse(content);
                } catch (error) {
                    contentDiv.textContent = content;
                }
            } else {
                contentDiv.textContent = content;
            }

            messageDiv.appendChild(avatar);
            messageDiv.appendChild(contentDiv);

            this.elements.messagesContainer.appendChild(messageDiv);
            this.elements.messagesContainer.scrollTop = this.elements.messagesContainer.scrollHeight;

            if (sender === 'agent') {
                this.showTypingIndicator(false);
                setTimeout(() => {
                    if (this.currentExecution) {
                        this.currentExecution = null;
                        this.updateCurrentStatusToIdle();
                    }
                }, 1000);
            }
        }

        showError(message) {
            const errorDiv = document.createElement('div');
            errorDiv.className = 'error-message';
            errorDiv.textContent = message;

            document.body.appendChild(errorDiv);
            setTimeout(() => {
                if (errorDiv.parentNode) {
                    errorDiv.parentNode.removeChild(errorDiv);
                }
            }, 5000);
        }

        sendWebSocketMessage(data) {
            if (this.ws && this.ws.readyState === WebSocket.OPEN) {
                this.ws.send(JSON.stringify(data));
            } else {
                console.warn('WebSocket not connected, cannot send message');
            }
        }

    }

    // Initialize UI when DOM is ready
    if (!window.TB) {
        document.addEventListener('DOMContentLoaded', () => {
            window.agentUI = new AgentRegistryUI();
        });
    } else {
        TB.once(() => {
            window.agentUI = new AgentRegistryUI();
        });
    }
</script>
</body>
</html>"""

registry

client

RegistryClient

Manages the client-side connection to the Registry Server with robust reconnection and long-running support.

Source code in toolboxv2/mods/registry/client.py
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
class RegistryClient:
    """Manages the client-side connection to the Registry Server with robust reconnection and long-running support."""

    def __init__(self, app: App):
        self.app = app

        # WebSocket connection
        self.ws: ws_client.WebSocketClientProtocol | None = None
        self.server_url: str | None = None

        # Task management
        self.connection_task: asyncio.Task | None = None
        self.ping_task: asyncio.Task | None = None
        self.message_handler_tasks: set[asyncio.Task] = set()
        self.progress_processor_task: asyncio.Task | None = None

        # Connection state
        self.is_connected = False
        self.should_reconnect = True
        self.reconnect_in_progress = False
        self.reconnect_attempts = 0
        self.max_reconnect_attempts = 10

        # Agent management
        self.local_agents: dict[str, Any] = {}
        self.registered_info: dict[str, AgentRegistered] = {}
        self.running_executions: dict[str, asyncio.Task] = {}
        self.persistent_callbacks: dict[str, Callable] = {}

        # Progress streaming (NO BATCHING - immediate streaming)
        self.progress_queues: dict[str, asyncio.Queue] = {}
        self.active_streams: set[str] = set()

        # Event handling
        self.custom_event_handlers: dict[str, Callable[[dict], Awaitable[None]]] = {}
        self.pending_registrations: dict[str, asyncio.Future] = {}
        self.registration_counter = 0

    # Utility Methods
    async def get_connection_status(self) -> dict[str, Any]:
        """Get detailed connection status information."""
        try:
            connection_status = {
                "is_connected": self.is_connected,
                "server_url": self.server_url,
                "reconnect_attempts": self.reconnect_attempts,
                "max_reconnect_attempts": self.max_reconnect_attempts,
                "should_reconnect": self.should_reconnect,
                "reconnect_in_progress": self.reconnect_in_progress,
                "websocket_state": None,
                "websocket_open": False,
                "tasks": {
                    "connection_task_running": self.connection_task and not self.connection_task.done(),
                    "ping_task_running": self.ping_task and not self.ping_task.done(),
                },
                "registered_agents_count": len(self.local_agents),
                "running_executions_count": len(self.running_executions),
                "pending_registrations_count": len(self.pending_registrations),
                "persistent_callbacks_count": len(self.persistent_callbacks),
                "last_ping_time": getattr(self, 'last_ping_time', None),
                "connection_uptime": None,
                "connection_established_at": getattr(self, 'connection_established_at', None),
            }

            # WebSocket specific status
            if self.ws:
                connection_status.update({
                    "websocket_state": str(self.ws.state.name) if hasattr(self.ws.state, 'name') else str(
                        self.ws.state),
                    "websocket_open": self.ws.open,
                    "websocket_closed": self.ws.closed,
                })

            # Calculate uptime
            if hasattr(self, 'connection_established_at') and self.connection_established_at:
                connection_status[
                    "connection_uptime"] = asyncio.get_event_loop().time() - self.connection_established_at

            return connection_status

        except Exception as e:
            self.app.print(f"Error getting connection status: {e}")
            return {
                "error": str(e),
                "is_connected": False,
                "server_url": self.server_url,
            }

    async def get_registered_agents(self) -> dict[str, AgentRegistered]:
        """Get all registered agents information."""
        try:
            agents_info = {}

            for agent_id, reg_info in self.registered_info.items():
                # Get agent instance if available
                agent_instance = self.local_agents.get(agent_id)

                # Create enhanced agent info
                agent_data = {
                    "registration_info": reg_info,
                    "agent_available": agent_instance is not None,
                    "agent_type": type(agent_instance).__name__ if agent_instance else "Unknown",
                    "has_progress_callback": hasattr(agent_instance, 'progress_callback') if agent_instance else False,
                    "supports_progress_callback": hasattr(agent_instance,
                                                          'set_progress_callback') if agent_instance else False,
                    "is_persistent_callback_active": agent_id in self.persistent_callbacks,
                    "registration_timestamp": getattr(reg_info, 'registration_timestamp', None),
                }

                # Add agent capabilities if available
                if agent_instance and hasattr(agent_instance, 'get_capabilities'):
                    try:
                        agent_data["capabilities"] = await agent_instance.get_capabilities()
                    except Exception as e:
                        agent_data["capabilities_error"] = str(e)

                agents_info[agent_id] = agent_data

            return agents_info

        except Exception as e:
            self.app.print(f"Error getting registered agents: {e}")
            return {}

    async def get_running_executions(self) -> dict[str, dict[str, Any]]:
        """Get information about currently running executions."""
        try:
            executions_info = {}

            for request_id, execution_task in self.running_executions.items():
                execution_info = {
                    "request_id": request_id,
                    "task_done": execution_task.done(),
                    "task_cancelled": execution_task.cancelled(),
                    "start_time": getattr(execution_task, 'start_time', None),
                    "running_time": None,
                    "task_exception": None,
                    "task_result": None,
                }

                # Calculate running time
                if hasattr(execution_task, 'start_time') and execution_task.start_time:
                    execution_info["running_time"] = asyncio.get_event_loop().time() - execution_task.start_time

                # Get task status details
                if execution_task.done():
                    try:
                        if execution_task.exception():
                            execution_info["task_exception"] = str(execution_task.exception())
                        else:
                            execution_info["task_result"] = "completed_successfully"
                    except Exception as e:
                        execution_info["task_status_error"] = str(e)

                executions_info[request_id] = execution_info

            return executions_info

        except Exception as e:
            self.app.print(f"Error getting running executions: {e}")
            return {}

    async def cancel_execution(self, request_id: str) -> bool:
        """Cancel a running execution."""
        try:
            if request_id not in self.running_executions:
                self.app.print(f"❌ Execution {request_id} not found")
                return False

            execution_task = self.running_executions[request_id]

            if execution_task.done():
                self.app.print(f"⚠️  Execution {request_id} already completed")
                return True

            # Cancel the task
            execution_task.cancel()

            try:
                # Wait a moment for graceful cancellation
                await asyncio.wait_for(execution_task, timeout=5.0)
            except asyncio.CancelledError:
                self.app.print(f"✅ Execution {request_id} cancelled successfully")
            except asyncio.TimeoutError:
                self.app.print(f"⚠️  Execution {request_id} cancellation timeout - may still be running")
            except Exception as e:
                self.app.print(f"⚠️  Execution {request_id} cancellation resulted in exception: {e}")

            # Send cancellation notice to server
            try:
                if self.is_connected and self.ws and self.ws.open:
                    cancellation_event = ProgressEvent(
                        event_type="execution_cancelled",
                        node_name="RegistryClient",
                        success=False,
                        metadata={
                            "request_id": request_id,
                            "cancellation_reason": "client_requested",
                            "timestamp": asyncio.get_event_loop().time()
                        }
                    )

                    cancellation_message = ExecutionResult(
                        request_id=request_id,
                        payload=cancellation_event.to_dict(),
                        is_final=True
                    )

                    await self._send_message('execution_result', cancellation_message.model_dump())

            except Exception as e:
                self.app.print(f"Failed to send cancellation notice to server: {e}")

            # Cleanup
            self.running_executions.pop(request_id, None)

            return True

        except Exception as e:
            self.app.print(f"Error cancelling execution {request_id}: {e}")
            return False

    async def health_check(self) -> bool:
        """Perform a health check of the connection."""
        try:
            # Basic connection checks
            if not self.is_connected:
                self.app.print("🔍 Health check: Not connected")
                return False

            if not self.ws or not self.ws.open:
                self.app.print("🔍 Health check: WebSocket not open")
                return False

            # Ping test
            try:
                pong_waiter = await self.ws.ping()
                await asyncio.wait_for(pong_waiter, timeout=10.0)

                # Update last ping time
                self.last_ping_time = asyncio.get_event_loop().time()

                # Test message sending
                test_message = WsMessage(
                    event='health_check',
                    data={
                        "timestamp": self.last_ping_time,
                        "client_id": getattr(self, 'client_id', 'unknown'),
                        "registered_agents": list(self.local_agents.keys()),
                        "running_executions": list(self.running_executions.keys())
                    }
                )

                await self.ws.send(test_message.model_dump_json())

                self.app.print("✅ Health check: Connection healthy")
                return True

            except asyncio.TimeoutError:
                self.app.print("❌ Health check: Ping timeout")
                return False
            except Exception as ping_error:
                self.app.print(f"❌ Health check: Ping failed - {ping_error}")
                return False

        except Exception as e:
            self.app.print(f"❌ Health check: Error - {e}")
            return False

    async def get_diagnostics(self) -> dict[str, Any]:
        """Get comprehensive diagnostic information."""
        try:
            diagnostics = {
                "connection_status": await self.get_connection_status(),
                "registered_agents": await self.get_registered_agents(),
                "running_executions": await self.get_running_executions(),
                "health_status": await self.health_check(),
                "system_info": {
                    "python_version": sys.version,
                    "asyncio_running": True,
                    "event_loop": str(asyncio.get_running_loop()),
                    "thread_name": threading.current_thread().name,
                },
                "performance_metrics": {
                    "total_messages_sent": getattr(self, 'total_messages_sent', 0),
                    "total_messages_received": getattr(self, 'total_messages_received', 0),
                    "total_reconnections": self.reconnect_attempts,
                    "total_registrations": len(self.registered_info),
                    "memory_usage": self._get_memory_usage(),
                },
                "error_log": getattr(self, 'recent_errors', []),
            }

            return diagnostics

        except Exception as e:
            return {
                "diagnostics_error": str(e),
                "timestamp": asyncio.get_event_loop().time()
            }

    def _get_memory_usage(self) -> dict[str, Any]:
        """Get memory usage information."""
        try:
            import psutil
            import os

            process = psutil.Process(os.getpid())
            memory_info = process.memory_info()

            return {
                "rss": memory_info.rss,
                "vms": memory_info.vms,
                "percent": process.memory_percent(),
                "available": psutil.virtual_memory().available,
            }
        except ImportError:
            return {"error": "psutil not available"}
        except Exception as e:
            return {"error": str(e)}

    async def cleanup_completed_executions(self):
        """Clean up completed execution tasks."""
        try:
            completed_tasks = []

            for request_id, task in self.running_executions.items():
                if task.done():
                    completed_tasks.append(request_id)

            for request_id in completed_tasks:
                self.running_executions.pop(request_id, None)
                self.app.print(f"🧹 Cleaned up completed execution: {request_id}")

            return len(completed_tasks)

        except Exception as e:
            self.app.print(f"Error during cleanup: {e}")
            return 0

    async def connect(self, server_url: str, timeout: float = 30.0):
        """Connect and start all background tasks."""
        if not ws_client:
            self.app.print("Websockets library not installed. Please run 'pip install websockets'")
            return False

        if self.ws and self.ws.open:
            self.app.print("Already connected to the registry server.")
            return True

        self.server_url = server_url
        self.should_reconnect = True
        self.reconnect_in_progress = False

        try:
            self.app.print(f"Connecting to Registry Server at {server_url}...")
            self.ws = await asyncio.wait_for(
                ws_client.connect(server_url),
                timeout=timeout
            )

            self.is_connected = True
            self.reconnect_attempts = 0

            # Start all background tasks
            await self._start_all_background_tasks()

            self.app.print(f"✅ Successfully connected and started all tasks")
            return True

        except asyncio.TimeoutError:
            self.app.print(f"❌ Connection timeout after {timeout}s")
            return False
        except Exception as e:
            self.app.print(f"❌ Connection failed: {e}")
            return False

    async def _start_all_background_tasks(self):
        """Start all background tasks needed for operation."""
        # Start connection listener
        self.connection_task = asyncio.create_task(self._listen())

        # Start ping task
        self.ping_task = asyncio.create_task(self._ping_loop())

        self.app.print("🚀 All background tasks started")
    async def _start_ping_task(self):
        """Start the ping/heartbeat task in the background."""
        if self.ping_task and not self.ping_task.done():
            return  # Already running

        self.ping_task = asyncio.create_task(self._ping_loop())

    async def _ping_loop(self):
        """Dedicated ping task that never blocks and has highest priority."""
        ping_interval = 20  # Less aggressive than server's 5s interval
        consecutive_failures = 0
        max_failures = 2

        while self.is_connected and self.should_reconnect:
            try:
                await asyncio.sleep(ping_interval)

                # Double-check connection state
                if not self.ws or not self.ws.open or self.ws.closed:
                    self.app.print("Ping task detected closed connection")
                    break

                try:
                    # Send ping with short timeout
                    pong_waiter = await self.ws.ping()
                    await asyncio.wait_for(pong_waiter, timeout=8.0)  # Less than server's 10s timeout

                    consecutive_failures = 0
                    self.app.print("📡 Heartbeat successful")

                except asyncio.TimeoutError:
                    consecutive_failures += 1
                    self.app.print(f"⚠️ Ping timeout ({consecutive_failures}/{max_failures})")

                    if consecutive_failures >= max_failures:
                        self.app.print("❌ Multiple ping timeouts - connection dead")
                        break

                except Exception as ping_error:
                    consecutive_failures += 1
                    self.app.print(f"❌ Ping error ({consecutive_failures}/{max_failures}): {ping_error}")

                    if consecutive_failures >= max_failures:
                        break

            except Exception as e:
                self.app.print(f"Ping loop error: {e}")
                break

        self.app.print("Ping task stopped")
        # Trigger reconnect if we should still be connected
        if self.should_reconnect and self.is_connected:
            asyncio.create_task(self._trigger_reconnect())

    async def _trigger_reconnect(self):
        """Trigger a reconnection attempt."""
        if self.reconnect_in_progress:
            return

        self.reconnect_in_progress = True
        self.is_connected = False

        try:
            if self.ws:
                with contextlib.suppress(Exception):
                    await self.ws.close()
                self.ws = None

            # Stop current tasks
            if self.connection_task and not self.connection_task.done():
                self.connection_task.cancel()
            if self.ping_task and not self.ping_task.done():
                self.ping_task.cancel()

            self.app.print("🔄 Attempting to reconnect...")
            await self._reconnect_with_backoff()

        finally:
            self.reconnect_in_progress = False

    async def _reconnect_with_backoff(self):
        """Reconnect with exponential backoff."""
        max_attempts = 10
        base_delay = 2
        max_delay = 300  # 5 minutes max

        for attempt in range(max_attempts):
            if not self.should_reconnect:
                break

            delay = min(base_delay * (2 ** attempt), max_delay)
            self.app.print(f"🔄 Reconnect attempt {attempt + 1}/{max_attempts} in {delay}s...")

            await asyncio.sleep(delay)

            try:
                if self.server_url:
                    self.ws = await ws_client.connect(self.server_url)
                    self.is_connected = True
                    self.reconnect_attempts = 0

                    # Restart tasks
                    self.connection_task = asyncio.create_task(self._listen())
                    await self._start_ping_task()

                    # Re-register agents
                    await self._reregister_agents()

                    self.app.print("✅ Reconnected successfully!")
                    return

            except Exception as e:
                self.app.print(f"❌ Reconnect attempt {attempt + 1} failed: {e}")

        self.app.print("❌ All reconnection attempts failed")
        self.should_reconnect = False

    async def _reregister_agents(self):
        """Re-register all local agents after reconnection."""
        if not self.registered_info:
            self.app.print("No agents to re-register")
            return

        self.app.print(f"Re-registering {len(self.registered_info)} agents...")

        for agent_id, reg_info in list(self.registered_info.items()):
            try:
                agent_instance = self.local_agents.get(agent_id)
                if not agent_instance:
                    continue

                # Create new registration (server will assign new IDs)
                new_reg_info = await self.register(
                    agent_instance,
                    reg_info.public_name,
                    self.local_agents.get(f"{agent_id}_description", "Re-registered agent")
                )

                if new_reg_info:
                    # Update stored information
                    old_agent_id = agent_id
                    new_agent_id = new_reg_info.public_agent_id

                    # Move agent to new ID
                    self.local_agents[new_agent_id] = self.local_agents.pop(old_agent_id)
                    self.registered_info[new_agent_id] = self.registered_info.pop(old_agent_id)

                    self.app.print(f"✅ Re-registered agent: {reg_info.public_name} (new ID: {new_agent_id})")
                else:
                    self.app.print(f"❌ Failed to re-register agent: {reg_info.public_name}")

            except Exception as e:
                self.app.print(f"Error re-registering agent {reg_info.public_name}: {e}")

        self.app.print("Agent re-registration completed")

    async def _create_persistent_progress_callback(self, request_id: str, agent_id: str):
        """Create progress callback with offline queuing capability."""
        progress_queue = asyncio.Queue(maxsize=100)  # Buffer for offline messages

        async def persistent_progress_callback(event: ProgressEvent):
            try:
                # Add to queue first
                try:
                    progress_queue.put_nowait((event, asyncio.get_event_loop().time()))
                except asyncio.QueueFull:
                    # Remove oldest item and add new one
                    try:
                        progress_queue.get_nowait()
                        progress_queue.put_nowait((event, asyncio.get_event_loop().time()))
                    except asyncio.QueueEmpty:
                        pass

                # Try to send immediately if connected
                if await self._check_connection_health():
                    try:
                        result = ExecutionResult(
                            request_id=request_id,
                            payload=event.to_dict(),
                            is_final=False
                        )
                        success = await self._send_message('execution_result', result.model_dump())
                        if success:
                            # Remove from queue since it was sent successfully
                            try:
                                progress_queue.get_nowait()
                            except asyncio.QueueEmpty:
                                pass
                            return
                    except Exception as e:
                        self.app.print(f"Progress send failed, queued: {e}")

                # If we get here, message is queued for later sending

            except Exception as e:
                self.app.print(f"Progress callback error: {e}")

        # Store queue for later processing
        self.progress_queues[request_id] = progress_queue
        return persistent_progress_callback
    async def _store_progress_callback_state(self, agent_id: str, callback_func):
        """Store progress callback for reconnection scenarios."""
        self.persistent_callbacks[agent_id] = callback_func

    async def _restore_progress_callbacks(self):
        """Restore progress callbacks after reconnection."""
        for agent_id, callback_func in self.persistent_callbacks.items():
            agent = self.local_agents.get(agent_id)
            if agent and hasattr(agent, 'set_progress_callback'):
                agent.set_progress_callback(callback_func)

    def on(self, event_name: str, handler: Callable[[dict], Awaitable[None]]):
        """Register an async callback function to handle a custom event from the server."""
        self.app.print(f"Handler for custom event '{event_name}' registered.")
        self.custom_event_handlers[event_name] = handler

    async def send_custom_event(self, event_name: str, data: dict[str, Any]):
        """Send a custom event with a JSON payload to the server."""
        if not self.is_connected or not self.ws or not self.ws.open:
            self.app.print("Cannot send custom event: Not connected.")
            return

        try:
            message = WsMessage(event=event_name, data=data)
            await self.ws.send(message.model_dump_json())
            self.app.print(f"Sent custom event '{event_name}' to server.")
        except Exception as e:
            self.app.print(f"Failed to send custom event: {e}")
            await self._handle_connection_error()

    async def _listen(self):
        """Robust message listening loop with immediate connection loss detection."""
        self.app.print("Registry client is now listening for incoming requests...")

        try:
            while self.is_connected and self.ws and self.ws.open:
                try:
                    # Check connection state before each recv attempt
                    if self.ws.closed:
                        self.app.print("WebSocket is closed - triggering reconnect")
                        break

                    message_raw = await asyncio.wait_for(self.ws.recv(), timeout=5.0)

                    # Handle different message types immediately
                    if isinstance(message_raw, bytes):
                        # Server ping - respond immediately
                        continue

                    # Process text messages
                    try:
                        message = WsMessage.model_validate_json(message_raw)
                        # Handle critical messages immediately, others in background
                        if message.event in ['agent_registered']:
                            await self._handle_message(message)
                        else:
                            # Handle non-critical messages in background to avoid blocking
                            task = asyncio.create_task(self._handle_message(message))
                            self.message_handler_tasks.add(task)
                            # Clean completed tasks
                            self.message_handler_tasks = {t for t in self.message_handler_tasks if not t.done()}

                    except Exception as e:
                        self.app.print(f"Error processing message: {e} | Raw: {message_raw[:200]}")

                except asyncio.TimeoutError:
                    # Normal timeout - check connection health
                    if not self.ws or not self.ws.open or self.ws.closed:
                        self.app.print("Connection health check failed during timeout")
                        break
                    continue

                except ConnectionClosed as e:
                    self.app.print(f"Connection closed by server: {e}")
                    break

                except Exception as e:
                    # Any other WebSocket error means connection is likely dead
                    if "ConnectionClosedError" in str(type(e)) or "IncompleteReadError" in str(type(e)):
                        self.app.print(f"Connection lost: {e}")
                        break
                    else:
                        self.app.print(f"Unexpected error in listen loop: {e}")
                        # Don't break on unexpected errors, but log them
                        await asyncio.sleep(0.1)

        except Exception as e:
            self.app.print(f"Fatal error in listen loop: {e}")
        finally:
            # Always trigger reconnection attempt
            if self.should_reconnect:
                asyncio.create_task(self._trigger_reconnect())

    async def _handle_message(self, message: WsMessage):
        """Handle incoming WebSocket messages with non-blocking execution."""
        try:
            if message.event == 'agent_registered':
                # Handle registration confirmation immediately
                reg_info = AgentRegistered.model_validate(message.data)
                reg_id = None
                for rid, future in self.pending_registrations.items():
                    if not future.done():
                        reg_id = rid
                        break

                if reg_id and reg_id in self.pending_registrations:
                    if not self.pending_registrations[reg_id].done():
                        self.pending_registrations[reg_id].set_result(reg_info)
                else:
                    self.app.print("Received agent_registered but no pending registration found")

            elif message.event == 'run_request':
                # Handle run requests in background - NEVER block here
                run_data = RunRequest.model_validate(message.data)
                asyncio.create_task(self._handle_run_request(run_data))

            elif message.event in self.custom_event_handlers:
                # Handle custom events in background
                self.app.print(f"Received custom event '{message.event}' from server.")
                handler = self.custom_event_handlers[message.event]
                asyncio.create_task(handler(message.data))

            else:
                self.app.print(f"Received unhandled event from server: '{message.event}'")

        except Exception as e:
            self.app.print(f"Error handling message: {e}")
            # Don't let message handling errors kill the connection

    async def register(self, agent_instance: Any, public_name: str, description: str | None = None) -> AgentRegistered | None:
        """Register an agent with the server."""
        if not self.is_connected or not self.ws:
            self.app.print("Not connected. Cannot register agent.")
            return None

        try:
            # Create registration request
            registration = AgentRegistration(public_name=public_name, description=description)
            message = WsMessage(event='register', data=registration.model_dump())

            # Create future for registration response
            reg_id = f"reg_{self.registration_counter}"
            self.registration_counter += 1
            self.pending_registrations[reg_id] = asyncio.Future()

            # Send registration request
            await self.ws.send(message.model_dump_json())
            self.app.print(f"Sent registration request for agent '{public_name}'")

            # Wait for registration confirmation
            try:
                reg_info = await asyncio.wait_for(self.pending_registrations[reg_id], timeout=30.0)

                # Store agent and registration info
                self.local_agents[reg_info.public_agent_id] = agent_instance
                self.registered_info[reg_info.public_agent_id] = reg_info

                self.app.print(f"Agent '{public_name}' registered successfully.")
                self.app.print(f"  Public URL: {reg_info.public_url}")
                self.app.print(f"  API Key: {reg_info.public_api_key}")

                return reg_info

            except TimeoutError:
                self.app.print("Timeout waiting for registration confirmation.")
                return None

        except Exception as e:
            self.app.print(f"Error during registration: {e}")
            return None
        finally:
            # Cleanup pending registration
            self.pending_registrations.pop(reg_id, None)

    async def _handle_run_request(self, run_request: RunRequest):
        """Handle run request - start agent in completely separate task."""
        agent_id = run_request.public_agent_id
        agent = self.local_agents.get(agent_id)

        if not agent:
            await self._stream_error(run_request.request_id, f"Agent with ID {agent_id} not found")
            return

        # Start agent execution in separate task - NEVER await here
        execution_task = asyncio.create_task(
            self._execute_agent_with_monitoring(agent, run_request)
        )

        # Store task but don't wait for it
        self.running_executions[run_request.request_id] = execution_task

        self.app.print(f"🚀 Agent execution started in background: {run_request.request_id}")
        # This method returns immediately - agent runs in background
    async def _execute_agent_with_monitoring(self, agent: Any, run_request: RunRequest):
        """Execute agent in completely separate task - never blocks main connection."""
        request_id = run_request.request_id
        agent_id = run_request.public_agent_id

        try:
            # Create progress streaming callback
            progress_callback = await self._create_streaming_progress_callback(request_id, agent_id)

            # Store original callback
            original_callback = getattr(agent, 'progress_callback', None)

            # Set streaming progress callback
            if hasattr(agent, 'set_progress_callback'):
                agent.set_progress_callback(progress_callback)
            elif hasattr(agent, 'progress_callback'):
                agent.progress_callback = progress_callback

            # Store for reconnection scenarios
            self.persistent_callbacks[agent_id] = progress_callback
            self.active_streams.add(request_id)

            self.app.print(f"🚀 Starting agent execution in separate task: {request_id}")

            # EXECUTE THE AGENT - this can run for hours/days
            final_result = await agent.a_run(
                query=run_request.query,
                session_id=run_request.session_id,
                **run_request.kwargs
            )

            # Send final result
            await self._stream_final_result(request_id, final_result, agent_id, run_request.session_id)

            self.app.print(f"✅ Agent execution completed: {request_id}")

        except Exception as e:
            self.app.print(f"❌ Agent execution failed: {e}")
            await self._stream_error(request_id, str(e))
            import traceback
            traceback.print_exc()

        finally:
            # Cleanup
            await self.running_executions.pop(request_id, None)
            self.persistent_callbacks.pop(agent_id, None)
            self.active_streams.discard(request_id)

            # Close progress queue
            if request_id in self.progress_queues:
                queue = self.progress_queues.pop(request_id)
                # Signal queue processor to stop for this request
                try:
                    await queue.put(None)  # Sentinel value
                except:
                    pass

            # Restore original callback
            try:
                if hasattr(agent, 'set_progress_callback'):
                    agent.set_progress_callback(original_callback)
                elif hasattr(agent, 'progress_callback'):
                    agent.progress_callback = original_callback
            except Exception as cleanup_error:
                self.app.print(f"Warning: Callback cleanup failed: {cleanup_error}")

    async def _stream_final_result(self, request_id: str, final_result: Any, agent_id: str, session_id: str):
        """Stream final result immediately."""
        final_event = ProgressEvent(
            event_type="execution_complete",
            node_name="RegistryClient",
            success=True,
            metadata={
                "result": final_result,
                "agent_id": agent_id,
                "session_id": session_id
            }
        )

        final_message = ExecutionResult(
            request_id=request_id,
            payload=final_event.to_dict(),
            is_final=True
        )

        # Stream final result with high priority
        max_attempts = 10
        for attempt in range(max_attempts):
            try:
                if await self._check_connection_health():
                    success = await self._send_message('execution_result', final_message.model_dump())
                    if success:
                        self.app.print(f"✅ Final result streamed successfully")
                        return

                await asyncio.sleep(1.0 * (attempt + 1))  # Longer delays for final result

            except Exception as e:
                self.app.print(f"Final result stream attempt {attempt + 1} failed: {e}")

        self.app.print(f"❌ Failed to stream final result after {max_attempts} attempts")

    async def _stream_error(self, request_id: str, error_message: str):
        """Stream error immediately."""
        error_payload = ExecutionError(request_id=request_id, error=error_message)

        for attempt in range(5):
            try:
                if await self._check_connection_health():
                    success = await self._send_message('execution_error', error_payload.model_dump())
                    if success:
                        return
                await asyncio.sleep(0.5 * (attempt + 1))
            except Exception as e:
                self.app.print(f"Error stream attempt {attempt + 1} failed: {e}")

    async def _create_streaming_progress_callback(self, request_id: str, agent_id: str):
        """Create callback that streams progress immediately as it comes."""
        # Create queue for this specific request
        progress_queue = asyncio.Queue()
        self.progress_queues[request_id] = progress_queue

        # Start dedicated processor for this request
        processor_task = asyncio.create_task(
            self._process_progress_stream(request_id, progress_queue)
        )

        async def streaming_progress_callback(event: ProgressEvent):
            """Stream progress immediately - no batching, no delays."""
            try:
                if request_id in self.active_streams:
                    # Put in queue for immediate processing
                    await progress_queue.put(event)
            except Exception as e:
                self.app.print(f"Progress streaming error: {e}")

        return streaming_progress_callback

    async def _process_progress_stream(self, request_id: str, progress_queue: asyncio.Queue):
        """Process progress stream in real-time - separate task per request."""
        self.app.print(f"📡 Started progress streaming for request: {request_id}")

        while request_id in self.active_streams:
            try:
                # Get next progress event (blocking)
                event = await progress_queue.get()

                # Sentinel value to stop
                if event is None:
                    break

                # Stream immediately - no batching
                await self._stream_progress_immediately(request_id, event)

            except Exception as e:
                self.app.print(f"Progress stream processing error: {e}")
                await asyncio.sleep(0.1)  # Brief pause on error

        self.app.print(f"📡 Stopped progress streaming for request: {request_id}")

    async def _stream_progress_immediately(self, request_id: str, event: ProgressEvent):
        """Stream single progress event immediately."""
        max_attempts = 3

        for attempt in range(max_attempts):
            try:
                if await self._check_connection_health():
                    result = ExecutionResult(
                        request_id=request_id,
                        payload=event.to_dict(),
                        is_final=False
                    )

                    success = await self._send_message('execution_result', result.model_dump())
                    if success:
                        return  # Successfully streamed

                # Connection unhealthy - brief wait before retry
                await asyncio.sleep(0.2 * (attempt + 1))

            except Exception as e:
                self.app.print(f"Stream attempt {attempt + 1} failed: {e}")
                if attempt < max_attempts - 1:
                    await asyncio.sleep(0.2 * (attempt + 1))

        # All attempts failed - but don't crash, just log
        self.app.print(f"⚠️ Failed to stream progress after {max_attempts} attempts")


    async def send_ui_progress(self, progress_data: dict[str, Any], retry_count: int = 3):
        """Enhanced UI progress sender with retry logic."""
        if not self.is_connected or not self.ws or not self.ws.open:
            self.app.print("Registry client WebSocket not connected - queuing progress update")
            # Could implement a queue here for offline progress updates
            return False

        for attempt in range(retry_count):
            try:
                # Structure progress message for registry server
                ui_message = {
                    "timestamp": progress_data.get('timestamp', asyncio.get_event_loop().time()),
                    "agent_id": progress_data.get('agent_id', 'unknown'),
                    "event_type": progress_data.get('event_type', 'unknown'),
                    "status": progress_data.get('status', 'processing'),
                    "agent_name": progress_data.get('agent_name', 'Unknown'),
                    "node_name": progress_data.get('node_name', 'Unknown'),
                    "session_id": progress_data.get('session_id'),
                    "metadata": progress_data.get('metadata', {}),

                    # Enhanced progress data for UI panels
                    "outline_progress": progress_data.get('progress_data', {}).get('outline', {}),
                    "activity_info": progress_data.get('progress_data', {}).get('activity', {}),
                    "meta_tool_info": progress_data.get('progress_data', {}).get('meta_tool', {}),
                    "system_status": progress_data.get('progress_data', {}).get('system', {}),
                    "graph_info": progress_data.get('progress_data', {}).get('graph', {}),

                    # UI flags for selective updates
                    "ui_flags": progress_data.get('ui_flags', {}),

                    # Performance metrics
                    "performance": progress_data.get('performance', {}),

                    # Message metadata
                    "message_id": f"msg_{asyncio.get_event_loop().time()}_{attempt}",
                    "retry_count": attempt
                }

                # Send as WsMessage
                message = WsMessage(event='ui_progress_update', data=ui_message)
                await self.ws.send(message.model_dump_json())

                # Success - break retry loop
                self.app.print(
                    f"📤 Sent UI progress: {progress_data.get('event_type')} | {progress_data.get('status')} (attempt {attempt + 1})")
                return True

            except Exception as e:
                self.app.print(f"Failed to send UI progress (attempt {attempt + 1}/{retry_count}): {e}")
                if attempt < retry_count - 1:
                    await asyncio.sleep(0.5 * (attempt + 1))  # Exponential backoff
                else:
                    await self._handle_connection_error()
                    return False

        return False

    async def send_agent_status(self, agent_id: str, status: str, details: dict[str, Any] = None):
        """Send agent status updates."""
        if not self.is_connected or not self.ws or not self.ws.open:
            return

        try:
            status_message = {
                "agent_id": agent_id,
                "status": status,
                "details": details or {},
                "timestamp": asyncio.get_event_loop().time(),
                "capabilities": ["chat", "progress_tracking", "outline_visualization", "meta_tool_monitoring"]
            }

            message = WsMessage(event='agent_status_update', data=status_message)
            await self.ws.send(message.model_dump_json())

        except Exception as e:
            self.app.print(f"Failed to send agent status: {e}")
            await self._handle_connection_error()

    async def _send_error(self, request_id: str, error_message: str):
        """Send error message to server."""
        error_payload = ExecutionError(request_id=request_id, error=error_message)
        await self._send_message('execution_error', error_payload.model_dump())

    async def _check_connection_health(self) -> bool:
        """Check if the WebSocket connection is actually healthy."""
        if not self.ws:
            return False

        try:
            # Check basic connection state
            if self.ws.closed or not self.ws.open:
                return False

            # Try a quick ping to verify connectivity
            pong_waiter = await self.ws.ping()
            await asyncio.wait_for(pong_waiter, timeout=3.0)
            return True

        except Exception as e:
            self.app.print(f"Connection health check failed: {e}")
            return False

    async def _send_message(self, event: str, data: dict, max_retries: int = 3):
        """Enhanced message sending with connection health verification."""
        for attempt in range(max_retries):
            # Check connection health before attempting to send
            if not await self._check_connection_health():
                self.app.print(f"Connection unhealthy for message '{event}' (attempt {attempt + 1})")

                if attempt < max_retries - 1:
                    await asyncio.sleep(0.5 * (attempt + 1))
                    continue
                else:
                    self.app.print(f"Cannot send message '{event}': Connection permanently failed")
                    asyncio.create_task(self._trigger_reconnect())
                    return False

            try:
                message = WsMessage(event=event, data=data)
                await self.ws.send(message.model_dump_json())
                return True

            except Exception as e:
                self.app.print(f"Send attempt {attempt + 1} failed for '{event}': {e}")

                # Check if this is a connection-related error
                error_str = str(e).lower()
                if any(err in error_str for err in ['connectionclosed', 'incomplete', 'connection', 'closed']):
                    self.app.print("Connection error detected - triggering reconnect")
                    asyncio.create_task(self._trigger_reconnect())
                    return False

                if attempt < max_retries - 1:
                    await asyncio.sleep(0.5 * (attempt + 1))

        return False
    async def _send_final_result_with_retry(self, request_id: str, final_result: Any, agent_id: str, session_id: str):
        """Send final result with robust retry logic."""
        final_event = ProgressEvent(
            event_type="execution_complete",
            node_name="RegistryClient",
            success=True,
            metadata={
                "result": final_result,
                "agent_id": agent_id,
                "session_id": session_id
            }
        )

        final_message = ExecutionResult(
            request_id=request_id,
            payload=final_event.to_dict(),
            is_final=True
        )

        max_retries = 10
        base_delay = 2

        for attempt in range(max_retries):
            try:
                if not self.is_connected or not self.ws or not self.ws.open:
                    self.app.print(f"⚠️  Connection lost - waiting for reconnection (attempt {attempt + 1})")
                    await asyncio.sleep(base_delay * (attempt + 1))
                    continue

                await self._send_message('execution_result', final_message.model_dump())
                self.app.print(f"✅ Final result sent successfully on attempt {attempt + 1}")
                return

            except Exception as e:
                delay = base_delay * (2 ** attempt)
                self.app.print(f"❌ Failed to send final result (attempt {attempt + 1}): {e}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(delay)

        self.app.print(f"❌ Failed to send final result after {max_retries} attempts")

    async def _send_error_with_retry(self, request_id: str, error_message: str):
        """Send error message with retry logic."""
        max_retries = 5

        for attempt in range(max_retries):
            try:
                if self.is_connected and self.ws and self.ws.open:
                    await self._send_error(request_id, error_message)
                    return
                else:
                    await asyncio.sleep(2 * (attempt + 1))
            except Exception as e:
                self.app.print(f"Error sending error message (attempt {attempt + 1}): {e}")
                if attempt < max_retries - 1:
                    await asyncio.sleep(2 * (attempt + 1))

    async def _handle_connection_error(self):
        """Handle connection errors and cleanup."""
        self.is_connected = False
        if self.ws:
            with contextlib.suppress(builtins.BaseException):
                await self.ws.close()
            self.ws = None

    async def disconnect(self):
        """Enhanced disconnect with complete task cleanup."""
        self.app.print("Initiating clean shutdown...")
        self.is_connected = False
        self.should_reconnect = False

        # Cancel all background tasks
        tasks_to_cancel = []

        if self.connection_task and not self.connection_task.done():
            tasks_to_cancel.append(self.connection_task)

        if self.ping_task and not self.ping_task.done():
            tasks_to_cancel.append(self.ping_task)

        # Cancel message handler tasks
        for task in list(self.message_handler_tasks):
            if not task.done():
                tasks_to_cancel.append(task)

        # Cancel running executions
        for task in list(self.running_executions.values()):
            if not task.done():
                tasks_to_cancel.append(task)

        if tasks_to_cancel:
            self.app.print(f"Cancelling {len(tasks_to_cancel)} background tasks...")
            for task in tasks_to_cancel:
                task.cancel()

            # Wait for cancellation with timeout
            try:
                await asyncio.wait_for(
                    asyncio.gather(*tasks_to_cancel, return_exceptions=True),
                    timeout=5.0
                )
            except asyncio.TimeoutError:
                self.app.print("Warning: Some tasks didn't cancel within timeout")

        # Close WebSocket connection
        if self.ws:
            with contextlib.suppress(Exception):
                await self.ws.close()
            self.ws = None

        # Cancel pending registrations
        for future in self.pending_registrations.values():
            if not future.done():
                future.cancel()
        self.pending_registrations.clear()

        # Clear state
        self.message_handler_tasks.clear()
        self.running_executions.clear()
        self.persistent_callbacks.clear()

        self.connection_task = None
        self.ping_task = None

        self.app.print("✅ Registry client shutdown completed")
cancel_execution(request_id) async

Cancel a running execution.

Source code in toolboxv2/mods/registry/client.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
async def cancel_execution(self, request_id: str) -> bool:
    """Cancel a running execution."""
    try:
        if request_id not in self.running_executions:
            self.app.print(f"❌ Execution {request_id} not found")
            return False

        execution_task = self.running_executions[request_id]

        if execution_task.done():
            self.app.print(f"⚠️  Execution {request_id} already completed")
            return True

        # Cancel the task
        execution_task.cancel()

        try:
            # Wait a moment for graceful cancellation
            await asyncio.wait_for(execution_task, timeout=5.0)
        except asyncio.CancelledError:
            self.app.print(f"✅ Execution {request_id} cancelled successfully")
        except asyncio.TimeoutError:
            self.app.print(f"⚠️  Execution {request_id} cancellation timeout - may still be running")
        except Exception as e:
            self.app.print(f"⚠️  Execution {request_id} cancellation resulted in exception: {e}")

        # Send cancellation notice to server
        try:
            if self.is_connected and self.ws and self.ws.open:
                cancellation_event = ProgressEvent(
                    event_type="execution_cancelled",
                    node_name="RegistryClient",
                    success=False,
                    metadata={
                        "request_id": request_id,
                        "cancellation_reason": "client_requested",
                        "timestamp": asyncio.get_event_loop().time()
                    }
                )

                cancellation_message = ExecutionResult(
                    request_id=request_id,
                    payload=cancellation_event.to_dict(),
                    is_final=True
                )

                await self._send_message('execution_result', cancellation_message.model_dump())

        except Exception as e:
            self.app.print(f"Failed to send cancellation notice to server: {e}")

        # Cleanup
        self.running_executions.pop(request_id, None)

        return True

    except Exception as e:
        self.app.print(f"Error cancelling execution {request_id}: {e}")
        return False
cleanup_completed_executions() async

Clean up completed execution tasks.

Source code in toolboxv2/mods/registry/client.py
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
async def cleanup_completed_executions(self):
    """Clean up completed execution tasks."""
    try:
        completed_tasks = []

        for request_id, task in self.running_executions.items():
            if task.done():
                completed_tasks.append(request_id)

        for request_id in completed_tasks:
            self.running_executions.pop(request_id, None)
            self.app.print(f"🧹 Cleaned up completed execution: {request_id}")

        return len(completed_tasks)

    except Exception as e:
        self.app.print(f"Error during cleanup: {e}")
        return 0
connect(server_url, timeout=30.0) async

Connect and start all background tasks.

Source code in toolboxv2/mods/registry/client.py
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
async def connect(self, server_url: str, timeout: float = 30.0):
    """Connect and start all background tasks."""
    if not ws_client:
        self.app.print("Websockets library not installed. Please run 'pip install websockets'")
        return False

    if self.ws and self.ws.open:
        self.app.print("Already connected to the registry server.")
        return True

    self.server_url = server_url
    self.should_reconnect = True
    self.reconnect_in_progress = False

    try:
        self.app.print(f"Connecting to Registry Server at {server_url}...")
        self.ws = await asyncio.wait_for(
            ws_client.connect(server_url),
            timeout=timeout
        )

        self.is_connected = True
        self.reconnect_attempts = 0

        # Start all background tasks
        await self._start_all_background_tasks()

        self.app.print(f"✅ Successfully connected and started all tasks")
        return True

    except asyncio.TimeoutError:
        self.app.print(f"❌ Connection timeout after {timeout}s")
        return False
    except Exception as e:
        self.app.print(f"❌ Connection failed: {e}")
        return False
disconnect() async

Enhanced disconnect with complete task cleanup.

Source code in toolboxv2/mods/registry/client.py
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
async def disconnect(self):
    """Enhanced disconnect with complete task cleanup."""
    self.app.print("Initiating clean shutdown...")
    self.is_connected = False
    self.should_reconnect = False

    # Cancel all background tasks
    tasks_to_cancel = []

    if self.connection_task and not self.connection_task.done():
        tasks_to_cancel.append(self.connection_task)

    if self.ping_task and not self.ping_task.done():
        tasks_to_cancel.append(self.ping_task)

    # Cancel message handler tasks
    for task in list(self.message_handler_tasks):
        if not task.done():
            tasks_to_cancel.append(task)

    # Cancel running executions
    for task in list(self.running_executions.values()):
        if not task.done():
            tasks_to_cancel.append(task)

    if tasks_to_cancel:
        self.app.print(f"Cancelling {len(tasks_to_cancel)} background tasks...")
        for task in tasks_to_cancel:
            task.cancel()

        # Wait for cancellation with timeout
        try:
            await asyncio.wait_for(
                asyncio.gather(*tasks_to_cancel, return_exceptions=True),
                timeout=5.0
            )
        except asyncio.TimeoutError:
            self.app.print("Warning: Some tasks didn't cancel within timeout")

    # Close WebSocket connection
    if self.ws:
        with contextlib.suppress(Exception):
            await self.ws.close()
        self.ws = None

    # Cancel pending registrations
    for future in self.pending_registrations.values():
        if not future.done():
            future.cancel()
    self.pending_registrations.clear()

    # Clear state
    self.message_handler_tasks.clear()
    self.running_executions.clear()
    self.persistent_callbacks.clear()

    self.connection_task = None
    self.ping_task = None

    self.app.print("✅ Registry client shutdown completed")
get_connection_status() async

Get detailed connection status information.

Source code in toolboxv2/mods/registry/client.py
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
async def get_connection_status(self) -> dict[str, Any]:
    """Get detailed connection status information."""
    try:
        connection_status = {
            "is_connected": self.is_connected,
            "server_url": self.server_url,
            "reconnect_attempts": self.reconnect_attempts,
            "max_reconnect_attempts": self.max_reconnect_attempts,
            "should_reconnect": self.should_reconnect,
            "reconnect_in_progress": self.reconnect_in_progress,
            "websocket_state": None,
            "websocket_open": False,
            "tasks": {
                "connection_task_running": self.connection_task and not self.connection_task.done(),
                "ping_task_running": self.ping_task and not self.ping_task.done(),
            },
            "registered_agents_count": len(self.local_agents),
            "running_executions_count": len(self.running_executions),
            "pending_registrations_count": len(self.pending_registrations),
            "persistent_callbacks_count": len(self.persistent_callbacks),
            "last_ping_time": getattr(self, 'last_ping_time', None),
            "connection_uptime": None,
            "connection_established_at": getattr(self, 'connection_established_at', None),
        }

        # WebSocket specific status
        if self.ws:
            connection_status.update({
                "websocket_state": str(self.ws.state.name) if hasattr(self.ws.state, 'name') else str(
                    self.ws.state),
                "websocket_open": self.ws.open,
                "websocket_closed": self.ws.closed,
            })

        # Calculate uptime
        if hasattr(self, 'connection_established_at') and self.connection_established_at:
            connection_status[
                "connection_uptime"] = asyncio.get_event_loop().time() - self.connection_established_at

        return connection_status

    except Exception as e:
        self.app.print(f"Error getting connection status: {e}")
        return {
            "error": str(e),
            "is_connected": False,
            "server_url": self.server_url,
        }
get_diagnostics() async

Get comprehensive diagnostic information.

Source code in toolboxv2/mods/registry/client.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
async def get_diagnostics(self) -> dict[str, Any]:
    """Get comprehensive diagnostic information."""
    try:
        diagnostics = {
            "connection_status": await self.get_connection_status(),
            "registered_agents": await self.get_registered_agents(),
            "running_executions": await self.get_running_executions(),
            "health_status": await self.health_check(),
            "system_info": {
                "python_version": sys.version,
                "asyncio_running": True,
                "event_loop": str(asyncio.get_running_loop()),
                "thread_name": threading.current_thread().name,
            },
            "performance_metrics": {
                "total_messages_sent": getattr(self, 'total_messages_sent', 0),
                "total_messages_received": getattr(self, 'total_messages_received', 0),
                "total_reconnections": self.reconnect_attempts,
                "total_registrations": len(self.registered_info),
                "memory_usage": self._get_memory_usage(),
            },
            "error_log": getattr(self, 'recent_errors', []),
        }

        return diagnostics

    except Exception as e:
        return {
            "diagnostics_error": str(e),
            "timestamp": asyncio.get_event_loop().time()
        }
get_registered_agents() async

Get all registered agents information.

Source code in toolboxv2/mods/registry/client.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
async def get_registered_agents(self) -> dict[str, AgentRegistered]:
    """Get all registered agents information."""
    try:
        agents_info = {}

        for agent_id, reg_info in self.registered_info.items():
            # Get agent instance if available
            agent_instance = self.local_agents.get(agent_id)

            # Create enhanced agent info
            agent_data = {
                "registration_info": reg_info,
                "agent_available": agent_instance is not None,
                "agent_type": type(agent_instance).__name__ if agent_instance else "Unknown",
                "has_progress_callback": hasattr(agent_instance, 'progress_callback') if agent_instance else False,
                "supports_progress_callback": hasattr(agent_instance,
                                                      'set_progress_callback') if agent_instance else False,
                "is_persistent_callback_active": agent_id in self.persistent_callbacks,
                "registration_timestamp": getattr(reg_info, 'registration_timestamp', None),
            }

            # Add agent capabilities if available
            if agent_instance and hasattr(agent_instance, 'get_capabilities'):
                try:
                    agent_data["capabilities"] = await agent_instance.get_capabilities()
                except Exception as e:
                    agent_data["capabilities_error"] = str(e)

            agents_info[agent_id] = agent_data

        return agents_info

    except Exception as e:
        self.app.print(f"Error getting registered agents: {e}")
        return {}
get_running_executions() async

Get information about currently running executions.

Source code in toolboxv2/mods/registry/client.py
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
async def get_running_executions(self) -> dict[str, dict[str, Any]]:
    """Get information about currently running executions."""
    try:
        executions_info = {}

        for request_id, execution_task in self.running_executions.items():
            execution_info = {
                "request_id": request_id,
                "task_done": execution_task.done(),
                "task_cancelled": execution_task.cancelled(),
                "start_time": getattr(execution_task, 'start_time', None),
                "running_time": None,
                "task_exception": None,
                "task_result": None,
            }

            # Calculate running time
            if hasattr(execution_task, 'start_time') and execution_task.start_time:
                execution_info["running_time"] = asyncio.get_event_loop().time() - execution_task.start_time

            # Get task status details
            if execution_task.done():
                try:
                    if execution_task.exception():
                        execution_info["task_exception"] = str(execution_task.exception())
                    else:
                        execution_info["task_result"] = "completed_successfully"
                except Exception as e:
                    execution_info["task_status_error"] = str(e)

            executions_info[request_id] = execution_info

        return executions_info

    except Exception as e:
        self.app.print(f"Error getting running executions: {e}")
        return {}
health_check() async

Perform a health check of the connection.

Source code in toolboxv2/mods/registry/client.py
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
async def health_check(self) -> bool:
    """Perform a health check of the connection."""
    try:
        # Basic connection checks
        if not self.is_connected:
            self.app.print("🔍 Health check: Not connected")
            return False

        if not self.ws or not self.ws.open:
            self.app.print("🔍 Health check: WebSocket not open")
            return False

        # Ping test
        try:
            pong_waiter = await self.ws.ping()
            await asyncio.wait_for(pong_waiter, timeout=10.0)

            # Update last ping time
            self.last_ping_time = asyncio.get_event_loop().time()

            # Test message sending
            test_message = WsMessage(
                event='health_check',
                data={
                    "timestamp": self.last_ping_time,
                    "client_id": getattr(self, 'client_id', 'unknown'),
                    "registered_agents": list(self.local_agents.keys()),
                    "running_executions": list(self.running_executions.keys())
                }
            )

            await self.ws.send(test_message.model_dump_json())

            self.app.print("✅ Health check: Connection healthy")
            return True

        except asyncio.TimeoutError:
            self.app.print("❌ Health check: Ping timeout")
            return False
        except Exception as ping_error:
            self.app.print(f"❌ Health check: Ping failed - {ping_error}")
            return False

    except Exception as e:
        self.app.print(f"❌ Health check: Error - {e}")
        return False
on(event_name, handler)

Register an async callback function to handle a custom event from the server.

Source code in toolboxv2/mods/registry/client.py
627
628
629
630
def on(self, event_name: str, handler: Callable[[dict], Awaitable[None]]):
    """Register an async callback function to handle a custom event from the server."""
    self.app.print(f"Handler for custom event '{event_name}' registered.")
    self.custom_event_handlers[event_name] = handler
register(agent_instance, public_name, description=None) async

Register an agent with the server.

Source code in toolboxv2/mods/registry/client.py
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
async def register(self, agent_instance: Any, public_name: str, description: str | None = None) -> AgentRegistered | None:
    """Register an agent with the server."""
    if not self.is_connected or not self.ws:
        self.app.print("Not connected. Cannot register agent.")
        return None

    try:
        # Create registration request
        registration = AgentRegistration(public_name=public_name, description=description)
        message = WsMessage(event='register', data=registration.model_dump())

        # Create future for registration response
        reg_id = f"reg_{self.registration_counter}"
        self.registration_counter += 1
        self.pending_registrations[reg_id] = asyncio.Future()

        # Send registration request
        await self.ws.send(message.model_dump_json())
        self.app.print(f"Sent registration request for agent '{public_name}'")

        # Wait for registration confirmation
        try:
            reg_info = await asyncio.wait_for(self.pending_registrations[reg_id], timeout=30.0)

            # Store agent and registration info
            self.local_agents[reg_info.public_agent_id] = agent_instance
            self.registered_info[reg_info.public_agent_id] = reg_info

            self.app.print(f"Agent '{public_name}' registered successfully.")
            self.app.print(f"  Public URL: {reg_info.public_url}")
            self.app.print(f"  API Key: {reg_info.public_api_key}")

            return reg_info

        except TimeoutError:
            self.app.print("Timeout waiting for registration confirmation.")
            return None

    except Exception as e:
        self.app.print(f"Error during registration: {e}")
        return None
    finally:
        # Cleanup pending registration
        self.pending_registrations.pop(reg_id, None)
send_agent_status(agent_id, status, details=None) async

Send agent status updates.

Source code in toolboxv2/mods/registry/client.py
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
async def send_agent_status(self, agent_id: str, status: str, details: dict[str, Any] = None):
    """Send agent status updates."""
    if not self.is_connected or not self.ws or not self.ws.open:
        return

    try:
        status_message = {
            "agent_id": agent_id,
            "status": status,
            "details": details or {},
            "timestamp": asyncio.get_event_loop().time(),
            "capabilities": ["chat", "progress_tracking", "outline_visualization", "meta_tool_monitoring"]
        }

        message = WsMessage(event='agent_status_update', data=status_message)
        await self.ws.send(message.model_dump_json())

    except Exception as e:
        self.app.print(f"Failed to send agent status: {e}")
        await self._handle_connection_error()
send_custom_event(event_name, data) async

Send a custom event with a JSON payload to the server.

Source code in toolboxv2/mods/registry/client.py
632
633
634
635
636
637
638
639
640
641
642
643
644
async def send_custom_event(self, event_name: str, data: dict[str, Any]):
    """Send a custom event with a JSON payload to the server."""
    if not self.is_connected or not self.ws or not self.ws.open:
        self.app.print("Cannot send custom event: Not connected.")
        return

    try:
        message = WsMessage(event=event_name, data=data)
        await self.ws.send(message.model_dump_json())
        self.app.print(f"Sent custom event '{event_name}' to server.")
    except Exception as e:
        self.app.print(f"Failed to send custom event: {e}")
        await self._handle_connection_error()
send_ui_progress(progress_data, retry_count=3) async

Enhanced UI progress sender with retry logic.

Source code in toolboxv2/mods/registry/client.py
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
async def send_ui_progress(self, progress_data: dict[str, Any], retry_count: int = 3):
    """Enhanced UI progress sender with retry logic."""
    if not self.is_connected or not self.ws or not self.ws.open:
        self.app.print("Registry client WebSocket not connected - queuing progress update")
        # Could implement a queue here for offline progress updates
        return False

    for attempt in range(retry_count):
        try:
            # Structure progress message for registry server
            ui_message = {
                "timestamp": progress_data.get('timestamp', asyncio.get_event_loop().time()),
                "agent_id": progress_data.get('agent_id', 'unknown'),
                "event_type": progress_data.get('event_type', 'unknown'),
                "status": progress_data.get('status', 'processing'),
                "agent_name": progress_data.get('agent_name', 'Unknown'),
                "node_name": progress_data.get('node_name', 'Unknown'),
                "session_id": progress_data.get('session_id'),
                "metadata": progress_data.get('metadata', {}),

                # Enhanced progress data for UI panels
                "outline_progress": progress_data.get('progress_data', {}).get('outline', {}),
                "activity_info": progress_data.get('progress_data', {}).get('activity', {}),
                "meta_tool_info": progress_data.get('progress_data', {}).get('meta_tool', {}),
                "system_status": progress_data.get('progress_data', {}).get('system', {}),
                "graph_info": progress_data.get('progress_data', {}).get('graph', {}),

                # UI flags for selective updates
                "ui_flags": progress_data.get('ui_flags', {}),

                # Performance metrics
                "performance": progress_data.get('performance', {}),

                # Message metadata
                "message_id": f"msg_{asyncio.get_event_loop().time()}_{attempt}",
                "retry_count": attempt
            }

            # Send as WsMessage
            message = WsMessage(event='ui_progress_update', data=ui_message)
            await self.ws.send(message.model_dump_json())

            # Success - break retry loop
            self.app.print(
                f"📤 Sent UI progress: {progress_data.get('event_type')} | {progress_data.get('status')} (attempt {attempt + 1})")
            return True

        except Exception as e:
            self.app.print(f"Failed to send UI progress (attempt {attempt + 1}/{retry_count}): {e}")
            if attempt < retry_count - 1:
                await asyncio.sleep(0.5 * (attempt + 1))  # Exponential backoff
            else:
                await self._handle_connection_error()
                return False

    return False
get_registry_client(app)

Factory function to get a singleton RegistryClient instance.

Source code in toolboxv2/mods/registry/client.py
1266
1267
1268
1269
1270
1271
def get_registry_client(app: App) -> RegistryClient:
    """Factory function to get a singleton RegistryClient instance."""
    app_id = app.id
    if app_id not in registry_clients:
        registry_clients[app_id] = RegistryClient(app)
    return registry_clients[app_id]

demo_custom_messaging

setup_chain_with_live_updates() async

Example 3: Create agent chain with live progress broadcasting

Source code in toolboxv2/mods/registry/demo_custom_messaging.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
async def setup_chain_with_live_updates():
    """Example 3: Create agent chain with live progress broadcasting"""
    app = get_app("ChainLiveExample")
    isaa = app.get_mod("isaa")

    # Initialize ISAA
    await isaa.init_isaa()

    # Create and register specialized agents

    # Research agent
    researcher_builder = isaa.get_agent_builder("researcher_agent")
    researcher_builder.with_system_message(
        "You are a research specialist. Gather comprehensive information and provide detailed analysis. "
        "Always report your progress clearly."
    )
    #researcher_builder.with_models(complex_llm_model="openrouter/openai/gpt-4o")
    await isaa.register_agent(researcher_builder)

    # Writer agent
    writer_builder = isaa.get_agent_builder("writer_agent")
    writer_builder.with_system_message(
        "You are a professional writer. Create well-structured, engaging content from research data. "
        "Report your writing progress step by step."
    )
    #writer_builder.with_models(complex_llm_model="openrouter/openai/gpt-4o")
    await isaa.register_agent(writer_builder)

    # Reviewer agent
    reviewer_builder = isaa.get_agent_builder("reviewer_agent")
    reviewer_builder.with_system_message(
        "You are a quality reviewer. Check for accuracy, completeness, and suggest improvements. "
        "Report your review progress clearly."
    )
    # reviewer_builder.with_models(fast_llm_model="openrouter/anthropic/claude-3-haiku")
    await isaa.register_agent(reviewer_builder)

    # Get agent instances
    researcher = await isaa.get_agent("researcher_agent")
    writer = await isaa.get_agent("writer_agent")
    reviewer = await isaa.get_agent("reviewer_agent")

    # Create chain using the >> operator for sequential execution
    from pydantic import BaseModel
    class Topick(BaseModel):
        topic: str

    class MiniBlog(BaseModel):
        title: str
        content: str

    class Review(BaseModel):
        feedback: str
        better_title: str
        better_content: str

    chain = researcher >> CF(Topick) >> writer >> CF(MiniBlog) >> reviewer >> CF(Review)
    chain.name = "content_creation_chain"

    # Publish chain with live updates - Progress Callback wird automatisch eingerichtet
    result = await isaa.publish_and_host_agent(
        agent=chain,
        public_name="Content Creation Pipeline",
        description="Multi-agent chain with live progress: Research → Write → Review",
        registry_server="ws://localhost:8080/ws/registry/connect",
    )

    if result.get('public_url'):
        app.print("🔗 Chain published successfully with Live Progress UI!")
        app.print(f"   Local UI: {result['ui_url']}")
        app.print(f"   WebSocket: {result.get('registry_server')}")
        app.print(f"   WebSocket: {result.get('websocket_url')}")
        app.print(f"   Public URL: {result.get('public_url')}")
        app.print(f"   API Key: {result.get('public_api_key')}")
        print(result)

        # Example usage - test the chain with live updates
        #pp.print("\n🧪 Testing chain execution with live progress tracking:")
        #ry:
        #   result_text = await chain.a_run(
        #       query="Create a comprehensive article about renewable energy trends in 2024",
        #       session_id="demo-session"
        #   )
        #   app.print(f"✅ Chain completed successfully!")
        #   app.print(f"   Result length: {len(result_text)} characters")
        #   app.print("   All progress was tracked live in the UI!")
        #xcept Exception as e:
        #   app.print(f"❌ Chain execution failed: {e}")

        # Keep services running with live status
        try:
            while True:
                await asyncio.sleep(30)
                app.print("💓 Chain services live - ready for requests")
        except KeyboardInterrupt:
            app.print("Shutting down chain services...")
    else:
        app.print("❌ Failed to publish chain to registry")

    # Clean shutdown
    await researcher.close()
    await writer.close()
    await reviewer.close()
setup_complete_agent_system(local=False) async

Vollständiges Beispiel für Agent-System mit Live-Progress.

Source code in toolboxv2/mods/registry/demo_custom_messaging.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
async def setup_complete_agent_system(local=False):
    """Vollständiges Beispiel für Agent-System mit Live-Progress."""

    app = get_app("CompleteAgentSystem")
    isaa = app.get_mod("isaa")

    # ISAA initialisieren
    await isaa.init_isaa()

    # Erweiterten Agent erstellen
    advanced_builder = isaa.get_agent_builder("production_assistant")
    advanced_builder.with_system_message("""
        Du bist ein produktions-fertiger AI-Assistent mit detailliertem Progress-Tracking.

        Arbeitsweise:
        1. Analysiere die Anfrage sorgfältig
        2. Erstelle einen strukturierten Plan (Outline)
        3. Führe jeden Schritt methodisch aus
        4. Verwende Meta-Tools für komplexe Aufgaben
        5. Berichte kontinuierlich über deinen Fortschritt
        6. Liefere umfassende, gut strukturierte Antworten

        Zeige immer, welche Tools du verwendest und warum.
        Erkläre deine Reasoning-Loops transparent.
        """)

    # Agent registrieren
    await isaa.register_agent(advanced_builder)
    agent = await isaa.get_agent("production_assistant")

    # **Produktionsfertige Publish & Host - Ein Aufruf macht alles**
    result = await isaa.publish_and_host_agent(
        agent=agent,
        public_name="Production AI Assistant",
        registry_server="ws://localhost:8080/ws/registry/connect" if local else "wss://simplecore.app/ws/registry/connect",
        description="Production-ready AI assistant with comprehensive progress tracking, step-by-step reasoning, and meta-tool visualization. Supports real-time progress updates, outline tracking, and multi-user access.",
        access_level="public"
    )

    if result.get('success'):
        app.print("🎉 AGENT SYSTEM FULLY DEPLOYED!")
        app.print("")
        app.print("🌐 Public Access:")
        app.print(f"   URL: {result['public_url']}")
        app.print(f"   API Key: {result['public_api_key']}")
        app.print("")
        app.print("🖥️  Live UI:")
        app.print(f"   Registry UI: {result['ui_url']}")
        if result.get('local_ui'):
            app.print(f"   Local UI: {result['local_ui'].get('ui_url')}")
        app.print("")
        app.print("🔌 WebSocket:")
        app.print(f"   Live Updates: {result['websocket_url']}")
        app.print("")
        app.print("📋 cURL Test:")
        app.print(f"""curl -X POST {result['public_url']} \\
  -H "Content-Type: application/json" \\
  -H "Authorization: Bearer {result['public_api_key']}" \\
  -d '{{"query": "Create a detailed analysis of quantum computing with step-by-step progress", "session_id": "test-session"}}'""")

        # Lokaler Test des Agents
        app.print("\n🧪 Testing agent locally...")
        #await asyncio.sleep(5)
        #test_result = await agent.a_run(
        #    "hey",
        #    session_id="local_test"
        #)
        app.print("✅ Test completed successfully!")

        # Service am Leben halten
        try:
            while True:
                await asyncio.sleep(30)
                app.print("💓 Agent services running - ready for requests")
        except KeyboardInterrupt:
            app.print("🛑 Shutting down agent services...")
    else:
        app.print(f"❌ Deployment failed: {result.get('error')}")
        print(result)

    await agent.close()
setup_multiple_live_agents() async

Example 4: Host multiple agents with individual live UIs

Source code in toolboxv2/mods/registry/demo_custom_messaging.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
async def setup_multiple_live_agents():
    """Example 4: Host multiple agents with individual live UIs"""
    app = get_app("MultiAgentLiveExample")
    isaa = app.get_mod("isaa")

    # Initialize ISAA
    await isaa.init_isaa()

    # Create different specialized agents
    agents_config = [
        {
            "name": "math_tutor",
            "system": "You are a mathematics tutor. Explain concepts step-by-step with live progress updates.",
            "public_name": "Live Math Tutor",
            "port": 8770
        },
        {
            "name": "code_helper",
            "system": "You are a coding assistant. Help debug and explain code with detailed progress tracking.",
            "public_name": "Live Code Assistant",
            "port": 8771
        },
        {
            "name": "creative_writer",
            "system": "You are a creative writer. Generate stories and content with live creative process updates.",
            "public_name": "Live Creative Writer",
            "port": 8772
        }
    ]

    hosted_agents = []

    # Create and host each agent
    for config in agents_config:
        # Create agent builder
        builder = isaa.get_agent_builder(config["name"])
        builder.with_system_message(config["system"])
        # builder.with_models(complex_llm_model="openrouter/openai/gpt-4o")

        # Register agent
        await isaa.register_agent(builder)

        # Get agent instance
        agent = await isaa.get_agent(config["name"])

        # Host with live UI - Progress wird automatisch eingerichtet
        result = await isaa.publish_and_host_agent(
            agent=agent,
            public_name=config["public_name"],
            description=f"Specialized agent: {config['public_name']} with live progress updates",
        )

        hosted_agents.append({
            'name': config["name"],
            'agent': agent,
            'result': result
        })

        app.print(f"🚀 {config['public_name']} live at: {result['ui_url']}")

    # Test all agents with live progress
    app.print("\n🧪 Testing all agents with live progress:")

    test_queries = [
        ("math_tutor", "Explain how to solve quadratic equations step by step"),
        ("code_helper", "Debug this Python function and explain the process"),
        ("creative_writer", "Write a short story about AI and humans working together")
    ]

    for agent_name, query in test_queries:
        agent_info = next(a for a in hosted_agents if a['name'] == agent_name)
        app.print(f"Testing {agent_name} - watch live progress in UI...")

        try:
            result = await agent_info['agent'].a_run(query, session_id=f"test_{agent_name}")
            app.print(f"✅ {agent_name} completed - live progress was shown!")
        except Exception as e:
            app.print(f"❌ {agent_name} failed: {e}")

    # Keep all agents running
    try:
        while True:
            await asyncio.sleep(60)
            app.print("💓 All agents live and ready")
            for agent_info in hosted_agents:
                app.print(f"   • {agent_info['name']}: {agent_info['result']['ui_url']}")
    except KeyboardInterrupt:
        app.print("Shutting down all live agents...")
        for agent_info in hosted_agents:
            await agent_info['agent'].close()

demo_registry

run_end_user_test() async

Simuliert einen externen Aufruf an die öffentliche API des Registry Servers.

Source code in toolboxv2/mods/registry/demo_registry.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
async def run_end_user_test():
    """Simuliert einen externen Aufruf an die öffentliche API des Registry Servers."""
    print("--- [USER] Warte darauf, dass der Agent publiziert wird... ---")
    await published_event.wait()
    print("--- [USER] Agent ist jetzt öffentlich. Starte Testaufruf in 3 Sekunden... ---")
    await asyncio.sleep(3)

    public_url = published_info.get("public_url")
    api_key = published_info.get("public_api_key")

    if not public_url or not api_key:
        print("--- [USER] FEHLER: Keine öffentlichen Agenten-Infos gefunden!", file=sys.stderr)
        return

    print(f"--- [USER] Sende POST-Anfrage an: {public_url} ---")

    request_payload = {
        "query": "Hallo, weitergeleitete Welt!",
        "session_id": "ext-user-session-001"
    }

    headers = {
        "Authorization": f"Bearer {api_key}",
        "Content-Type": "application/json"
    }

    async with aiohttp.ClientSession() as session:
        try:
            async with session.post(public_url, json=request_payload, headers=headers) as response:
                print(f"--- [USER] Antwort-Status: {response.status} ---")

                if response.status == 200:
                    print("--- [USER] Beginne mit dem Streamen der Antwort-Events: ---")
                    # Die Antwort ist application/json-seq, also lesen wir zeilenweise
                    async for line in response.content:
                        if line:
                            try:
                                data = json.loads(line)
                                event_type = data.get('event_type', 'unknown')
                                status = data.get('status', '...')
                                print(f"  [STREAM] Event: {event_type:<20} | Status: {status} {data}")

                                # Der finale Event enthält das Ergebnis
                                if event_type == "final_result":
                                    final_result = data.get('details', {}).get('result')
                                    print("\n--- [USER] Endgültiges Ergebnis erhalten: ---")
                                    print(f"  >>> {final_result}")

                            except json.JSONDecodeError:
                                print(f"  [STREAM] Konnte Zeile nicht als JSON parsen: {line.decode()}")
                else:
                    error_text = await response.text()
                    print(f"--- [USER] FEHLER vom Server: {error_text}", file=sys.stderr)
        except aiohttp.ClientConnectorError as e:
            print(f"--- [USER] VERBINDUNGSFEHLER: Konnte den Server nicht erreichen. Läuft er? Fehler: {e}",
                  file=sys.stderr)
run_local_client() async

Startet die zweite toolboxv2-Instanz als lokalen Client, der einen Agenten hostet.

Source code in toolboxv2/mods/registry/demo_registry.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
async def run_local_client():
    """Startet die zweite toolboxv2-Instanz als lokalen Client, der einen Agenten hostet."""
    print("--- [CLIENT] Initialisiere lokale Client Instanz ---")
    client_app = get_app("LocalClientInstance")

    # ISAA-Modul für diese Instanz holen und initialisieren
    isaa: ISAA_Tools = client_app.get_mod("isaa")
    await isaa.init_isaa()
    print("--- [CLIENT] ISAA initialisiert. ---")

    # --- Agenten erstellen ---
    print("--- [CLIENT] Erstelle einen einfachen 'EchoAgent'... ---")
    builder = isaa.get_agent_builder("EchoAgent")
    builder.with_system_message("You are an echo agent. Repeat the user's query exactly, but prefix it with 'Echo: '.")
    await isaa.register_agent(builder)

    # Agenten-Instanz holen (dieser Schritt ist nicht zwingend für das Publizieren per Name, aber gut zur Demo)
    echo_agent = await isaa.get_agent("EchoAgent")
    print(f"--- [CLIENT] 'EchoAgent' ({type(echo_agent).__name__}) erstellt. ---")

    # --- Agenten publizieren ---
    # Warten, bis der Server sicher läuft
    await asyncio.sleep(2)

    server_ws_url = "ws://127.0.0.1:8080/ws/registry/connect"
    print(f"--- [CLIENT] Publiziert 'EchoAgent' am Server: {server_ws_url} ---")

    # Die neue `publish_agent` Methode aufrufen
    reg_info = await isaa.host_agent_ui(
        agent=echo_agent,
        public_name="Public Echo Service",
        server_url=server_ws_url,
        description="A simple agent that echoes your input."
    )

    if reg_info:
        print("--- [CLIENT] Agent erfolgreich publiziert! Details erhalten: ---")
        print(f"  > Public URL: {reg_info.public_url}")
        print(f"  > API Key: {reg_info.public_api_key}")

        # Speichere die Info und signalisiere dem Endbenutzer-Task, dass er starten kann
        published_info.update(reg_info.model_dump())
        published_event.set()
    else:
        print("--- [CLIENT] FEHLER: Agenten-Publizierung fehlgeschlagen. ---", file=sys.stderr)

    # Hält diesen Task am Leben, um auf Weiterleitungsanfragen zu lauschen.
    await asyncio.Future()
run_registry_server() async

Startet die erste toolboxv2-Instanz als unseren öffentlichen Server.

Source code in toolboxv2/mods/registry/demo_registry.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
async def run_registry_server():
    """Startet die erste toolboxv2-Instanz als unseren öffentlichen Server."""
    print("--- [SERVER] Initialisiere Registry Server Instanz ---")

    # Holt sich eine App-Instanz. Das Laden des 'registry'-Moduls geschieht
    # automatisch durch die __init__.py-Struktur von toolboxv2.
    server_app = get_app("RegistryServerInstance")

    # Startet den actix-web Server auf Port 8080.
    # `blocking=False` ist entscheidend, damit asyncio weiterlaufen kann.
    server_app.start_server()

    print("--- [SERVER] Registry Server läuft auf http://127.0.0.1:8080 ---")
    print("--- [SERVER] Wartet auf eingehende Client-Verbindungen... ---")

    # Hält diesen Task am Leben, um den Server laufen zu lassen.
    await asyncio.Future()

server

broadcast_to_ui_clients(app, data) async

Broadcast updates to all connected UI clients.

Source code in toolboxv2/mods/registry/server.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
async def broadcast_to_ui_clients(app: App, data: dict[str, Any]):
    """Broadcast updates to all connected UI clients."""
    if not STATE.ui_clients:
        app.print("No active UI clients to broadcast to")
        return

    app.print(f"Broadcasting to {len(STATE.ui_clients)} UI clients: {data.get('event', 'unknown')}")

    dead_clients = set()
    successful_broadcasts = 0

    for ui_conn_id in STATE.ui_clients.copy():
        try:
            await app.ws_send(ui_conn_id, data)
            successful_broadcasts += 1
        except Exception as e:
            app.print(f"Failed to broadcast to UI client {ui_conn_id}: {e}")
            dead_clients.add(ui_conn_id)

    # Clean up dead connections
    for dead_client in dead_clients:
        STATE.ui_clients.discard(dead_client)

    app.print(f"Broadcast completed: {successful_broadcasts} successful, {len(dead_clients)} failed")
handle_agent_status_update(app, message) async

Handle agent status updates.

Source code in toolboxv2/mods/registry/server.py
191
192
193
194
195
196
197
198
199
200
201
async def handle_agent_status_update(app: App, message: WsMessage):
    """Handle agent status updates."""
    try:
        status_data = message.data
        await broadcast_to_ui_clients(app, {
            'event': 'agent_status_update',
            'data': status_data
        })

    except Exception as e:
        app.print(f"Agent status update error: {e}", error=True)
handle_execution_error(app, message) async

Handle execution errors.

Source code in toolboxv2/mods/registry/server.py
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
async def handle_execution_error(app: App, message: WsMessage):
    """Handle execution errors."""
    try:
        error = ExecutionError.model_validate(message.data)

        if error.request_id in STATE.pending_requests:
            await STATE.pending_requests[error.request_id].put(error)

        await broadcast_to_ui_clients(app, {
            'event': 'execution_error',
            'data': {
                'request_id': error.request_id,
                'error': error.error,
                'timestamp': asyncio.get_event_loop().time()
            }
        })

    except Exception as e:
        app.print(f"Execution error handling error: {e}", error=True)
handle_execution_result(app, message) async

Handle execution results.

Source code in toolboxv2/mods/registry/server.py
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
async def handle_execution_result(app: App, message: WsMessage):
    """Handle execution results."""
    try:
        result = ExecutionResult.model_validate(message.data)

        if result.request_id in STATE.pending_requests:
            await STATE.pending_requests[result.request_id].put(result)

        # Broadcast to UI clients
        await broadcast_to_ui_clients(app, {
            'event': 'execution_progress',
            'data': {
                'request_id': result.request_id,
                'payload': result.payload,
                'is_final': result.is_final,
                'timestamp': asyncio.get_event_loop().time()
            }
        })

    except Exception as e:
        app.print(f"Execution result error: {e}", error=True)
handle_registration(app, conn_id, session, message) async

Handle agent registration.

Source code in toolboxv2/mods/registry/server.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
async def handle_registration(app: App, conn_id: str, session: dict, message: WsMessage):
    """Handle agent registration."""
    try:
        reg_data = AgentRegistration.model_validate(message.data)
        agent_id = f"agent_{secrets.token_urlsafe(16)}"
        api_key = f"tbk_{secrets.token_urlsafe(32)}"

        STATE.client_agents.setdefault(conn_id, []).append(agent_id)
        STATE.agent_to_client[agent_id] = conn_id
        STATE.key_to_agent[api_key] = agent_id
        STATE.agent_details[agent_id] = reg_data.model_dump()

        base_url = os.getenv("APP_BASE_URL", "http://localhost:8080") or session.get('host', 'localhost:8080')
        if base_url == "localhost":
            base_url = "localhost:8080"
            app.print("APP_BASE_URL is localhost. Using default port 8080.")
        public_url = f"{base_url}/api/registry/run?public_agent_id={agent_id}"

        if not public_url.startswith('http'):
            public_url = f"http://{public_url}"

        response = AgentRegistered(
            public_name=reg_data.public_name,
            public_agent_id=agent_id,
            public_api_key=api_key,
            public_url=public_url,
        )

        # Send registration confirmation
        response_message = WsMessage(event='agent_registered', data=response.model_dump())
        await app.ws_send(conn_id, response_message.model_dump())

        # Notify UI clients
        await broadcast_to_ui_clients(app, {
            "event": "agent_registered",
            "data": {
                "public_agent_id": agent_id,
                "public_name": reg_data.public_name,
                "description": reg_data.description,
                "status": "online"
            }
        })

        app.print(f"Agent '{reg_data.public_name}' registered with ID: {agent_id}")

    except Exception as e:
        app.print(f"Registration error: {e}", error=True)
handle_ui_progress_update(app, message) async

Handle UI progress updates.

Source code in toolboxv2/mods/registry/server.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
async def handle_ui_progress_update(app: App, message: WsMessage):
    """Handle UI progress updates."""
    try:
        progress_data = message.data
        agent_id = progress_data.get('agent_id', 'unknown')

        # Store recent progress
        if agent_id not in STATE.recent_progress:
            STATE.recent_progress[agent_id] = []
        STATE.recent_progress[agent_id].append(progress_data)

        # Keep only last 50 events
        STATE.recent_progress[agent_id] = STATE.recent_progress[agent_id][-50:]

        # Broadcast to UI clients
        await broadcast_to_ui_clients(app, {
            "event": "live_progress_update",
            "data": progress_data
        })

    except Exception as e:
        app.print(f"UI progress update error: {e}", error=True)
on_disconnect(app, conn_id, session=None) async

Enhanced disconnect handler with comprehensive cleanup and UI notifications.

Source code in toolboxv2/mods/registry/server.py
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
async def on_disconnect(app: App, conn_id: str, session: dict = None):
    """Enhanced disconnect handler with comprehensive cleanup and UI notifications."""
    app.print(f"Registry client disconnected: {conn_id}")

    # Check if this is a UI client
    if conn_id in STATE.ui_clients:
        STATE.ui_clients.discard(conn_id)
        app.print(f"UI client {conn_id} removed from active clients")
        return

    # Handle agent client disconnection
    if conn_id in STATE.client_agents:
        agent_ids_to_cleanup = STATE.client_agents[conn_id].copy()

        for agent_id in agent_ids_to_cleanup:
            try:
                # Get agent details before removal for notification
                agent_details = STATE.agent_details.get(agent_id, {})
                agent_name = agent_details.get('public_name', 'Unknown')

                # Remove from all state dictionaries
                STATE.agent_to_client.pop(agent_id, None)
                STATE.agent_details.pop(agent_id, None)

                # Remove API key mapping
                key_to_remove = next((k for k, v in STATE.key_to_agent.items() if v == agent_id), None)
                if key_to_remove:
                    STATE.key_to_agent.pop(key_to_remove, None)

                # Clean up progress data
                STATE.recent_progress.pop(agent_id, None)

                # Clean up any pending requests for this agent by checking if queue exists and clearing it
                requests_to_cleanup = []
                for req_id in list(STATE.pending_requests.keys()):
                    try:
                        # Put error in queue to unblock any waiting requests
                        error_result = ExecutionError(
                            request_id=req_id,
                            error="Agent disconnected unexpectedly",
                            public_agent_id=agent_id
                        )
                        await STATE.pending_requests[req_id].put(error_result)
                        requests_to_cleanup.append(req_id)
                    except Exception as e:
                        app.print(f"Error cleaning up pending request {req_id}: {e}")

                # Remove cleaned up requests
                for req_id in requests_to_cleanup:
                    STATE.pending_requests.pop(req_id, None)

                # Notify UI clients about agent going offline (non-blocking)
                if agent_details:
                    asyncio.create_task(broadcast_to_ui_clients(app, {
                        "event": "agent_offline",
                        "data": {
                            "public_agent_id": agent_id,
                            "public_name": agent_name,
                            "status": "offline",
                            "timestamp": asyncio.get_event_loop().time()
                        }
                    }))

                app.print(f"Agent '{agent_name}' (ID: {agent_id}) unregistered and cleaned up")

            except Exception as e:
                app.print(f"Error during agent cleanup for {agent_id}: {e}", error=True)

        # Remove the client connection entry
        STATE.client_agents.pop(conn_id, None)

        app.print(f"Client {conn_id} fully disconnected and cleaned up ({len(agent_ids_to_cleanup)} agents removed)")
    else:
        app.print(f"Unknown client {conn_id} disconnected (no agents to clean up)")
on_message(app, conn_id, session, payload) async

Enhanced message handler with proper error handling.

Source code in toolboxv2/mods/registry/server.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
async def on_message(app: App, conn_id: str, session: dict, payload: dict):
    """Enhanced message handler with proper error handling."""
    try:
        # Ensure payload is a dict
        if isinstance(payload, str):
            payload = json.loads(payload)

        message = WsMessage.model_validate(payload)
        app.print(f"Registry received event: {message.event} from {conn_id}")

        if message.event == 'register':
            await handle_registration(app, conn_id, session, message)

        elif message.event == 'ui_progress_update':
            await handle_ui_progress_update(app, message)

        elif message.event == 'execution_result':
            await handle_execution_result(app, message)

        elif message.event == 'execution_error':
            await handle_execution_error(app, message)

        elif message.event == 'agent_status_update':
            await handle_agent_status_update(app, message)

        else:
            app.print(f"Unhandled event '{message.event}' from client {conn_id}")

    except Exception as e:
        app.print(f"Error processing WebSocket message: {e}", error=True)
register_ui_ws_handlers(app)

Register UI-specific WebSocket handlers.

Source code in toolboxv2/mods/registry/server.py
416
417
418
419
420
421
422
423
@export(mod_name=Name, websocket_handler="ui_connect")
def register_ui_ws_handlers(app: App):
    """Register UI-specific WebSocket handlers."""
    return {
        "on_connect": ui_on_connect,
        "on_message": ui_on_message,
        "on_disconnect": ui_on_disconnect,
    }
register_ws_handlers(app)

Register WebSocket handlers for the registry.

Source code in toolboxv2/mods/registry/server.py
406
407
408
409
410
411
412
413
@export(mod_name=Name, websocket_handler="connect")
def register_ws_handlers(app: App):
    """Register WebSocket handlers for the registry."""
    return {
        "on_connect": on_connect,
        "on_message": on_message,
        "on_disconnect": on_disconnect,
    }
run(app, public_agent_id, request) async

Public API endpoint to run agents.

Source code in toolboxv2/mods/registry/server.py
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
@export(mod_name=Name, api=True, version="1", request_as_kwarg=True, api_methods=['POST'])
async def run(app: App, public_agent_id: str, request: RequestData):
    """Public API endpoint to run agents."""
    if request is None:
        return Result.default_user_error(info="Failed to run agent: No request provided.")
    if not request.headers:
        return Result.default_user_error(info="Failed to run agent: No request headers provided.")

    auth_header = request.headers.authorization or request.headers.to_dict().get('authorization')

    if not auth_header or not auth_header.startswith('Bearer '):
        return Result.default_user_error("Authorization header missing or invalid.", exec_code=401)

    api_key = auth_header.split(' ')[1]

    if STATE.key_to_agent.get(api_key) != public_agent_id:
        return Result.default_user_error("Invalid API Key or Agent ID.", exec_code=403)

    conn_id = STATE.agent_to_client.get(public_agent_id)
    if not conn_id:
        return Result.default_internal_error("Agent is not currently connected/online.", exec_code=503)

    body = request.body
    request_id = f"req_{secrets.token_urlsafe(16)}"

    run_request = RunRequest(
        request_id=request_id,
        public_agent_id=public_agent_id,
        query=body.get('query', ''),
        session_id=body.get('session_id'),
        kwargs=body.get('kwargs', {})
    )

    response_queue = asyncio.Queue()
    STATE.pending_requests[request_id] = response_queue

    # Send run request to the client
    await app.ws_send(conn_id, WsMessage(event='run_request', data=run_request.model_dump()).model_dump())

    try:
        final_result = None
        while True:
            item = await asyncio.wait_for(response_queue.get(), timeout=120.0)

            if isinstance(item, ExecutionError):
                return Result.default_internal_error(
                    info=f"An error occurred during agent execution: {item.error}",
                    exec_code=500
                )

            if item.is_final:
                final_result = item.payload.get("details", {}).get("result")
                break

        return Result.json(data={"result": final_result})

    except TimeoutError:
        return Result.default_internal_error(
            info="The request timed out as the agent did not respond in time.",
            exec_code=504
        )
    finally:
        STATE.pending_requests.pop(request_id, None)
ui(app, public_agent_id=None) async

Serve the interactive 3-panel agent UI.

Source code in toolboxv2/mods/registry/server.py
491
492
493
494
495
496
@export(mod_name=Name, api=True, version="1", api_methods=['GET'])
async def ui(app: App, public_agent_id: str = None):
    """Serve the interactive 3-panel agent UI."""
    from ..isaa.ui import get_agent_ui_html
    html_content = get_agent_ui_html()
    return Result.html(data=html_content, row=True)
ui_on_connect(app, conn_id, session) async

UI Client connection.

Source code in toolboxv2/mods/registry/server.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
async def ui_on_connect(app: App, conn_id: str, session: dict):
    """UI Client connection."""
    app.print(f"UI Client connecting: {conn_id}")
    STATE.ui_clients.add(conn_id)
    app.print(f"UI Client connected: {conn_id} (Total: {len(STATE.ui_clients)})")

    # Send current agents list
    available_agents = []
    for agent_id, details in STATE.agent_details.items():
        if agent_id in STATE.agent_to_client:
            available_agents.append({
                "public_agent_id": agent_id,
                "public_name": details.get('public_name', 'Unknown'),
                "description": details.get('description', ''),
                "status": "online"
            })

    await app.ws_send(conn_id, {
        "event": "agents_list",
        "data": {"agents": available_agents}
    })
ui_on_disconnect(app, conn_id, session=None) async

UI Client Disconnection.

Source code in toolboxv2/mods/registry/server.py
400
401
402
403
async def ui_on_disconnect(app: App, conn_id: str, session: dict = None):
    """UI Client Disconnection."""
    app.print(f"UI Client disconnected: {conn_id}")
    STATE.ui_clients.discard(conn_id)
ui_on_message(app, conn_id, session, payload) async

UI Client Message Handler.

Source code in toolboxv2/mods/registry/server.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
async def ui_on_message(app: App, conn_id: str, session: dict, payload: dict):
    """UI Client Message Handler."""
    try:
        # Ensure payload is a dict
        if isinstance(payload, str):
            payload = json.loads(payload)

        event = payload.get('event')
        data = payload.get('data', {})

        if event == 'subscribe_agent':
            agent_id = data.get('public_agent_id')
            if agent_id in STATE.agent_details:
                if agent_id in STATE.recent_progress:
                    for progress_event in STATE.recent_progress[agent_id][-10:]:
                        await app.ws_send(conn_id, {
                            "event": "historical_progress",
                            "data": progress_event
                        })

                await app.ws_send(conn_id, {
                    "event": "subscription_confirmed",
                    "data": {"public_agent_id": agent_id}
                })

        elif event == 'chat_message':
            agent_id = data.get('public_agent_id')
            message_text = data.get('message')
            session_id = data.get('session_id', f'ui_{conn_id}')
            api_key = data.get('api_key')

            if not api_key or STATE.key_to_agent.get(api_key) != agent_id:
                await app.ws_send(conn_id, {
                    "event": "error",
                    "data": {"error": "Invalid or missing API Key"}
                })
                return

            if agent_id in STATE.agent_to_client:
                agent_conn_id = STATE.agent_to_client[agent_id]
                request_id = f"ui_req_{secrets.token_urlsafe(16)}"

                run_request = RunRequest(
                    request_id=request_id,
                    public_agent_id=agent_id,
                    query=message_text,
                    session_id=session_id,
                    kwargs={}
                )

                response_queue = asyncio.Queue()
                STATE.pending_requests[request_id] = response_queue

                await app.ws_send(agent_conn_id, WsMessage(
                    event='run_request',
                    data=run_request.model_dump()
                ).model_dump())

                await app.ws_send(conn_id, {
                    "event": "message_acknowledged",
                    "data": {"request_id": request_id, "agent_id": agent_id}
                })

    except Exception as e:
        app.print(f"UI message handling error: {e}", error=True)
        await app.ws_send(conn_id, {
            "event": "error",
            "data": {"error": str(e)}
        })

types

AgentRegistered

Bases: BaseModel

Server -> Client: Response after successful registration.

Source code in toolboxv2/mods/registry/types.py
14
15
16
17
18
19
class AgentRegistered(BaseModel):
    """Server -> Client: Response after successful registration."""
    public_name: str
    public_agent_id: str = Field(..., description="The unique public ID for the agent.")
    public_api_key: str = Field(..., description="The secret API key for public access.")
    public_url: str = Field(..., description="The full public URL to run the agent.")
AgentRegistration

Bases: BaseModel

Client -> Server: Payload to register a new agent.

Source code in toolboxv2/mods/registry/types.py
 9
10
11
12
class AgentRegistration(BaseModel):
    """Client -> Server: Payload to register a new agent."""
    public_name: str = Field(..., description="A user-friendly name for the agent.")
    description: str | None = Field(None, description="Optional description of the agent's capabilities.")
ExecutionError

Bases: BaseModel

Client -> Server: Reports an error during execution.

Source code in toolboxv2/mods/registry/types.py
35
36
37
38
class ExecutionError(BaseModel):
    """Client -> Server: Reports an error during execution."""
    request_id: str
    error: str
ExecutionResult

Bases: BaseModel

Client -> Server: A chunk of the execution result (for streaming).

Source code in toolboxv2/mods/registry/types.py
29
30
31
32
33
class ExecutionResult(BaseModel):
    """Client -> Server: A chunk of the execution result (for streaming)."""
    request_id: str
    payload: dict[str, Any] = Field(..., description="The ProgressEvent or final result as a dictionary.")
    is_final: bool = Field(False, description="True if this is the last message for this request.")
RunRequest

Bases: BaseModel

Server -> Client: Request to execute an agent.

Source code in toolboxv2/mods/registry/types.py
21
22
23
24
25
26
27
class RunRequest(BaseModel):
    """Server -> Client: Request to execute an agent."""
    request_id: str = Field(..., description="A unique ID for this specific execution request.")
    public_agent_id: str = Field(..., description="The ID of the agent to run.")
    query: str = Field(..., description="The main input/query for the agent.")
    session_id: str | None = Field(None, description="Session ID for maintaining context.")
    kwargs: dict[str, Any] = Field({}, description="Additional keyword arguments for the a_run method.")
WsMessage

Bases: BaseModel

A generic wrapper for all WebSocket messages.

Source code in toolboxv2/mods/registry/types.py
40
41
42
43
class WsMessage(BaseModel):
    """A generic wrapper for all WebSocket messages."""
    event: str
    data: dict[str, Any]

talk

TalkSession

Bases: BaseModel

Represents the state of a single voice conversation session.

Source code in toolboxv2/mods/talk.py
24
25
26
27
28
29
30
31
32
33
34
class TalkSession(BaseModel):
    """Represents the state of a single voice conversation session."""
    session_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
    user_id: str
    chat_session: ChatSession
    event_queue: asyncio.Queue = Field(default_factory=asyncio.Queue, exclude=True)
    # Task to track the running agent process, preventing concurrent requests
    agent_task: asyncio.Task | None = Field(default=None, exclude=True)

    class Config:
        arbitrary_types_allowed = True

Tools

Bases: MainTool

The main class for the Talk module, handling initialization, session management, and dependency loading.

Source code in toolboxv2/mods/talk.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
class Tools(MainTool):
    """
    The main class for the Talk module, handling initialization,
    session management, and dependency loading.
    """

    def __init__(self, app: App):
        # Initialize the MainTool with module-specific information
        self.version = VERSION
        self.name = MOD_NAME
        self.color = "CYAN"
        self.sessions: dict[str, TalkSession] = {}
        self.stt_func = None
        self.tts_func = None
        self.isaa_mod = None
        super().__init__(load=self.on_start, v=VERSION, name=MOD_NAME, tool={}, on_exit=self.on_exit)

    def on_start(self):
        """Initializes the Talk module, its dependencies (ISAA, AUDIO), and UI registration."""
        self.app.logger.info(f"Starting {self.name} v{self.version}...")

        # Get the ISAA module instance, which is a critical dependency
        self.isaa_mod = None#self.app.get_mod("isaa")
        if not self.isaa_mod:
            self.app.logger.error(
                f"{self.name}: ISAA module not found or failed to load. Voice assistant will not be functional.")
            return

        # Initialize STT and TTS services from the AUDIO module
        if hasattr(TBEF, "AUDIO") and self.app.get_mod("AUDIO"):
            self.stt_func = self.app.run_any(TBEF.AUDIO.STT_GENERATE, model="openai/whisper-small", row=True, device=0)
            self.tts_func = self.app.get_function(TBEF.AUDIO.SPEECH, state=False)[0]

            if self.stt_func and self.stt_func != "404":
                self.app.logger.info("Talk STT (whisper-small) is Online.")
            else:
                self.app.logger.warning("Talk STT function not available.")
                self.stt_func = None

            if self.tts_func and self.tts_func != "404":
                self.app.logger.info("Talk TTS function is Online.")
            else:
                self.app.logger.warning("Talk TTS function not available.")
                self.tts_func = None
        else:
            self.app.logger.warning("Talk module: AUDIO module features are not available or the module is not loaded.")

        if not all([self.stt_func, self.tts_func]):
            self.app.logger.error("Talk module cannot function without both STT and TTS services.")

        # Register the UI component with CloudM
        self.app.run_any(("CloudM", "add_ui"),
                         name=MOD_NAME, title="Voice Assistant", path=f"/api/{MOD_NAME}/ui",
                         description="Natural conversation with an AI assistant.", auth=True)
        self.app.logger.info(f"{self.name} UI registered with CloudM.")

    def on_exit(self):
        """Clean up resources, especially cancelling any active agent tasks."""
        for session in self.sessions.values():
            if session.agent_task and not session.agent_task.done():
                session.agent_task.cancel()
        self.app.logger.info(f"Closing {self.name} and cleaning up sessions.")
on_exit()

Clean up resources, especially cancelling any active agent tasks.

Source code in toolboxv2/mods/talk.py
94
95
96
97
98
99
def on_exit(self):
    """Clean up resources, especially cancelling any active agent tasks."""
    for session in self.sessions.values():
        if session.agent_task and not session.agent_task.done():
            session.agent_task.cancel()
    self.app.logger.info(f"Closing {self.name} and cleaning up sessions.")
on_start()

Initializes the Talk module, its dependencies (ISAA, AUDIO), and UI registration.

Source code in toolboxv2/mods/talk.py
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
def on_start(self):
    """Initializes the Talk module, its dependencies (ISAA, AUDIO), and UI registration."""
    self.app.logger.info(f"Starting {self.name} v{self.version}...")

    # Get the ISAA module instance, which is a critical dependency
    self.isaa_mod = None#self.app.get_mod("isaa")
    if not self.isaa_mod:
        self.app.logger.error(
            f"{self.name}: ISAA module not found or failed to load. Voice assistant will not be functional.")
        return

    # Initialize STT and TTS services from the AUDIO module
    if hasattr(TBEF, "AUDIO") and self.app.get_mod("AUDIO"):
        self.stt_func = self.app.run_any(TBEF.AUDIO.STT_GENERATE, model="openai/whisper-small", row=True, device=0)
        self.tts_func = self.app.get_function(TBEF.AUDIO.SPEECH, state=False)[0]

        if self.stt_func and self.stt_func != "404":
            self.app.logger.info("Talk STT (whisper-small) is Online.")
        else:
            self.app.logger.warning("Talk STT function not available.")
            self.stt_func = None

        if self.tts_func and self.tts_func != "404":
            self.app.logger.info("Talk TTS function is Online.")
        else:
            self.app.logger.warning("Talk TTS function not available.")
            self.tts_func = None
    else:
        self.app.logger.warning("Talk module: AUDIO module features are not available or the module is not loaded.")

    if not all([self.stt_func, self.tts_func]):
        self.app.logger.error("Talk module cannot function without both STT and TTS services.")

    # Register the UI component with CloudM
    self.app.run_any(("CloudM", "add_ui"),
                     name=MOD_NAME, title="Voice Assistant", path=f"/api/{MOD_NAME}/ui",
                     description="Natural conversation with an AI assistant.", auth=True)
    self.app.logger.info(f"{self.name} UI registered with CloudM.")

api_open_stream(self, request, session_id) async

Opens a Server-Sent Events (SSE) stream for a given session ID.

Source code in toolboxv2/mods/talk.py
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
@export(mod_name=MOD_NAME, api=True, name="stream", api_methods=['GET'], request_as_kwarg=True)
async def api_open_stream(self: Tools, request: RequestData, session_id: str) -> Result:
    """Opens a Server-Sent Events (SSE) stream for a given session ID."""
    if not session_id or session_id not in self.sessions:
        return Result.default_user_error(info="Invalid or expired session ID.", exec_code=404)

    session = self.sessions[session_id]
    queue = session.event_queue

    async def event_generator() -> AsyncGenerator[dict[str, Any], None]:
        self.app.logger.info(f"SSE stream opened for session {session_id}")
        await queue.put({"event": "connection_ready", "data": "Stream connected successfully."})
        try:
            while True:
                event_data = await queue.get()
                yield event_data
                queue.task_done()
        except asyncio.CancelledError:
            self.app.logger.info(f"SSE stream for session {session_id} cancelled by client.")
        finally:
            if session_id in self.sessions:
                if self.sessions[session_id].agent_task and not self.sessions[session_id].agent_task.done():
                    self.sessions[session_id].agent_task.cancel()
                del self.sessions[session_id]
                self.app.logger.info(f"Cleaned up and closed session {session_id}.")

    return Result.sse(stream_generator=event_generator())

api_process_audio(self, request, form_data) async

Receives audio, transcribes it, and starts the agent processing task.

Source code in toolboxv2/mods/talk.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
@export(mod_name=MOD_NAME, api=True, name="process_audio", api_methods=['POST'], request_as_kwarg=True)
async def api_process_audio(self: Tools, request: RequestData, form_data: dict) -> Result:
    """Receives audio, transcribes it, and starts the agent processing task."""
    if not self.stt_func:
        return Result.default_internal_error(info="Speech-to-text service is not available.")

    session_id = form_data.get('session_id')
    audio_file_data = form_data.get('audio_blob')

    if not session_id or session_id not in self.sessions:
        return Result.default_user_error(info="Invalid or missing session_id.", exec_code=400)

    session = self.sessions[session_id]

    if session.agent_task and not session.agent_task.done():
        return Result.default_user_error(info="Already processing a previous request.", exec_code=429)

    if not audio_file_data or 'content_base64' not in audio_file_data:
        return Result.default_user_error(info="Audio data is missing or in the wrong format.", exec_code=400)

    try:
        audio_bytes = base64.b64decode(audio_file_data['content_base64'])
        transcription_result = self.stt_func(audio_bytes)
        transcribed_text = transcription_result.get('text', '').strip()

        if not transcribed_text:
            await session.event_queue.put({"event": "error", "data": "Could not understand audio. Please try again."})
            return Result.ok(data={"message": "Transcription was empty."})

        await session.event_queue.put({"event": "transcription_update", "data": transcribed_text})

        voice_params = {
            "voice_index": int(form_data.get('voice_index', '0')),
            "provider": form_data.get('provider', 'piper'),
            "model_name": form_data.get('model_name', 'ryan')
        }

        # Start the background task; the request returns immediately.
        session.agent_task = asyncio.create_task(
            _run_agent_and_respond(self, session, transcribed_text, voice_params)
        )
        return Result.ok(data={"message": "Audio received and processing started."})

    except Exception as e:
        self.app.logger.error(f"Error processing audio for session {session_id}: {e}", exc_info=True)
        return Result.default_internal_error(info=f"Failed to process audio: {str(e)}")

api_start_session(self, request) async

Creates a new talk session for an authenticated user.

Source code in toolboxv2/mods/talk.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
@export(mod_name=MOD_NAME, api=True, name="start_session", api_methods=['POST'], request_as_kwarg=True)
async def api_start_session(self: Tools, request: RequestData) -> Result:
    """Creates a new talk session for an authenticated user."""
    user_id = await _get_user_uid(self.app, request)
    if not user_id:
        return Result.default_user_error(info="User authentication required.", exec_code=401)

    if not self.isaa_mod:
        return Result.default_internal_error(info="ISAA module is not available.")

    # Create a new ISAA ChatSession for conversation history
    chat_session = ChatSession(mem=self.isaa_mod.get_memory())
    session = TalkSession(user_id=user_id, chat_session=chat_session)
    self.sessions[session.session_id] = session

    self.app.logger.info(f"Started new talk session {session.session_id} for user {user_id}")
    return Result.json(data={"session_id": session.session_id})

get_main_ui(self, request)

Serves the main HTML and JavaScript UI for the Talk widget.

Source code in toolboxv2/mods/talk.py
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
@export(mod_name=MOD_NAME, name="ui", api=True, api_methods=['GET'], request_as_kwarg=True)
def get_main_ui(self: Tools, request: RequestData) -> Result:
    """Serves the main HTML and JavaScript UI for the Talk widget."""
    html_content = """
<!DOCTYPE html>
<html lang="en" data-theme="light">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>ToolBoxV2 - Voice Assistant</title>
    <link rel="stylesheet" href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@20..48,100..700,0..1,-50..200" />
    <style>
        body { font-family: sans-serif; background-color: var(--theme-bg); color: var(--theme-text); display: flex; justify-content: center; align-items: center; min-height: 100vh; margin: 0; }
        .container { display: flex; flex-direction: column; align-items: center; justify-content: center; width: 100%; max-width: 600px; padding: 20px; text-align: center; }
        .visualizer { width: 250px; height: 250px; background-color: var(--glass-bg); border-radius: 50%; position: relative; overflow: hidden; border: 3px solid var(--theme-border); box-shadow: inset 0 0 15px rgba(0,0,0,0.2); transition: border-color 0.3s, box-shadow 0.3s; }
        .visualizer.recording { border-color: #ef4444; }
        .visualizer.thinking { border-color: #3b82f6; animation: pulse 2s infinite; }
        .visualizer.speaking { border-color: #22c55e; }
        .particle { position: absolute; width: 8px; height: 8px; background-color: var(--theme-primary); border-radius: 50%; pointer-events: none; transition: all 0.1s; }
        #micButton { margin-top: 30px; width: 80px; height: 80px; border-radius: 50%; border: none; background-color: var(--theme-primary); color: white; cursor: pointer; display: flex; justify-content: center; align-items: center; box-shadow: 0 4px 10px rgba(0,0,0,0.2); transition: background-color 0.2s, transform 0.1s; }
        #micButton:active { transform: scale(0.95); }
        #micButton:disabled { background-color: #9ca3af; cursor: not-allowed; }
        #micButton .material-symbols-outlined { font-size: 40px; }
        #statusText { margin-top: 20px; min-height: 50px; font-size: 1.2em; color: var(--theme-text-muted); line-height: 1.5; }
        @keyframes pulse { 0% { box-shadow: inset 0 0 15px rgba(0,0,0,0.2), 0 0 0 0 rgba(59, 130, 246, 0.7); } 70% { box-shadow: inset 0 0 15px rgba(0,0,0,0.2), 0 0 0 15px rgba(59, 130, 246, 0); } 100% { box-shadow: inset 0 0 15px rgba(0,0,0,0.2), 0 0 0 0 rgba(59, 130, 246, 0); } }
    </style>
</head>
<body>
    <div class="container">
        <div class="visualizer" id="visualizer"></div>
        <p id="statusText">Press the microphone to start</p>
        <button id="micButton"><span class="material-symbols-outlined">hourglass_empty</span></button>
        <div class="options" style="margin-top: 20px;">
            <label for="voiceSelect">Voice:</label>
            <select id="voiceSelect">
                <option value='{"provider": "piper", "model_name": "ryan", "voice_index": 0}'>Ryan (EN)</option>
                <option value='{"provider": "piper", "model_name": "kathleen", "voice_index": 0}'>Kathleen (EN)</option>
                <option value='{"provider": "piper", "model_name": "karlsson", "voice_index": 0}'>Karlsson (DE)</option>
            </select>
        </div>
    </div>
    <script unSave="true">
    function initTalk() {
        const visualizer = document.getElementById('visualizer');
        const micButton = document.getElementById('micButton');
        const statusText = document.getElementById('statusText');
        const voiceSelect = document.getElementById('voiceSelect');

        const state = { sessionId: null, sseConnection: null, mediaRecorder: null, audioChunks: [], isRecording: false, isProcessing: false, currentAudio: null };
        let audioContext, analyser, particles = [];

        function setStatus(text, mode = 'idle') {
            statusText.textContent = text;
            visualizer.className = 'visualizer ' + mode;
        }

        function createParticles(num = 50) {
            visualizer.innerHTML = ''; particles = [];
            for (let i = 0; i < num; i++) {
                const p = document.createElement('div'); p.classList.add('particle');
                visualizer.appendChild(p);
                particles.push({ element: p, angle: Math.random() * Math.PI * 2, radius: 50 + Math.random() * 50, speed: 0.01 + Math.random() * 0.02 });
            }
        }

        function animateVisualizer() {
            if (analyser) {
                const dataArray = new Uint8Array(analyser.frequencyBinCount);
                analyser.getByteFrequencyData(dataArray);
                let average = dataArray.reduce((a, b) => a + b, 0) / dataArray.length;
                particles.forEach(p => {
                    p.angle += p.speed;
                    const scale = 1 + (average / 128);
                    p.element.style.transform = `translate(${Math.cos(p.angle) * p.radius * scale}px, ${Math.sin(p.angle) * p.radius * scale}px)`;
                });
            }
            requestAnimationFrame(animateVisualizer);
        }

        async function startSession() {
            if (state.sessionId) return;
            setStatus("Connecting...", 'thinking');
            micButton.disabled = true;
            try {
                const response = await TB.api.request('talk', 'start_session', {}, 'POST');
                if (response.error === 'none' && response.get()?.session_id) {
                    state.sessionId = response.get().session_id;
                    connectSse();
                } else {
                    setStatus(response.info?.help_text || "Failed to start session.", 'error');
                }
            } catch (e) {
                setStatus("Connection error.", 'error');
            }
        }

        function connectSse() {
            if (!state.sessionId) return;
            state.sseConnection = TB.sse.connect(`/sse/talk/stream?session_id=${state.sessionId}`, {
                onOpen: () => console.log("SSE Stream Open"),
                onError: () => setStatus("Connection lost.", 'error'),
                listeners: {
                    'connection_ready': (data) => { setStatus("Press the microphone to start"); micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>'; },
                    'transcription_update': (data) => { setStatus(`“${data}”`, 'thinking'); state.isProcessing = true; },
                    'agent_thought': (data) => setStatus(data, 'thinking'),
                    'agent_response_chunk': (data) => { if (statusText.textContent.startsWith('“')) statusText.textContent = ""; statusText.textContent += data; },
                    'audio_playback': (data) => playAudio(data.content, data.format),
                    'processing_complete': (data) => { state.isProcessing = false; setStatus(data); micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>'; },
                    'error': (data) => { state.isProcessing = false; setStatus(data, 'error'); micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>'; }
                }
            });
        }

        async function playAudio(base64, format) {
            setStatus("...", 'speaking');
            const blob = await (await fetch(`data:${format};base64,${base64}`)).blob();
            const url = URL.createObjectURL(blob);
            if (state.currentAudio) state.currentAudio.pause();
            state.currentAudio = new Audio(url);

            if (!audioContext) audioContext = new AudioContext();
            const source = audioContext.createMediaElementSource(state.currentAudio);
            if (!analyser) { analyser = audioContext.createAnalyser(); analyser.fftSize = 64; }
            source.connect(analyser);
            analyser.connect(audioContext.destination);

            state.currentAudio.play();
            state.currentAudio.onended = () => { setStatus("Finished speaking."); URL.revokeObjectURL(url); };
        }

        async function toggleRecording() {
            if (state.isProcessing) return;
            if (!state.sessionId) { await startSession(); return; }

            if (state.isRecording) {
                state.mediaRecorder.stop();
                micButton.disabled = true;
                micButton.innerHTML = '<span class="material-symbols-outlined">hourglass_top</span>';
                setStatus("Processing...", 'thinking');
            } else {
                if (!state.mediaRecorder) {
                    try {
                        const stream = await navigator.mediaDevices.getUserMedia({ audio: { sampleRate: 16000, channelCount: 1 } });
                        if (!audioContext) audioContext = new AudioContext();
                        const source = audioContext.createMediaStreamSource(stream);
                        if (!analyser) { analyser = audioContext.createAnalyser(); analyser.fftSize = 64; }
                        source.connect(analyser);

                        state.mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm;codecs=opus' });
                        state.mediaRecorder.ondataavailable = e => state.audioChunks.push(e.data);
                        state.mediaRecorder.onstop = uploadAudio;
                    } catch (e) { setStatus("Could not access microphone.", 'error'); return; }
                }
                state.audioChunks = []; state.mediaRecorder.start(); state.isRecording = true;
                setStatus("Listening...", 'recording');
                micButton.innerHTML = '<span class="material-symbols-outlined">stop_circle</span>';
            }
        }

        async function uploadAudio() {
            state.isRecording = false; state.isProcessing = true;
            if (state.audioChunks.length === 0) { setStatus("No audio recorded."); state.isProcessing = false; micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>'; return; }
            const audioBlob = new Blob(state.audioChunks, { type: 'audio/webm;codecs=opus' });

            const formData = new FormData();
            formData.append('session_id', state.sessionId);
            formData.append('audio_blob', audioBlob, 'recording.webm');

            const voiceParams = JSON.parse(voiceSelect.value);
            for (const key in voiceParams) formData.append(key, voiceParams[key]);

            try {
                const response = await TB.api.request('talk', 'process_audio', formData, 'POST');
                if (response.error !== 'none') {
                    setStatus(response.info?.help_text || "Failed to process audio.", 'error');
                    state.isProcessing = false; micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>';
                }
            } catch(e) {
                 setStatus("Error sending audio.", 'error'); state.isProcessing = false; micButton.disabled = false; micButton.innerHTML = '<span class="material-symbols-outlined">mic</span>';
            }
        }

        micButton.addEventListener('click', toggleRecording);
        createParticles(); animateVisualizer();
        if (window.TB.isInitialized) startSession(); else window.TB.events.on('tbjs:initialized', startSession, { once: true });
    }
if (window.TB?.events) {
    if (window.TB.config?.get('appRootId')) { // A sign that TB.init might have run
         initTalk();
    } else {
        window.TB.events.on('tbjs:initialized', initTalk, { once: true });
    }
} else {
    // Fallback if TB is not even an object yet, very early load
    document.addEventListener('tbjs:initialized', initTalk, { once: true }); // Custom event dispatch from TB.init
}

    </script>
</body>
</html>"""
    return Result.html(data=html_content)

toolboxv2.flows_dict(s='.py', remote=False, dir_path=None, flows_dict_=None)

Source code in toolboxv2/flows/__init__.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def flows_dict(s='.py', remote=False, dir_path=None, flows_dict_=None):

    if flows_dict_ is None:
        flows_dict_ = {}
    with Spinner("Loading flows"):
        # Erhalte den Pfad zum aktuellen Verzeichnis
        if dir_path is None:
            for ex_path in os.getenv("EXTERNAL_PATH_RUNNABLE", '').split(','):
                if not ex_path or len(ex_path) == 0:
                    continue
                flows_dict(s,remote,ex_path,flows_dict_)
            dir_path = os.path.dirname(os.path.realpath(__file__))
        to = time.perf_counter()
        # Iteriere über alle Dateien im Verzeichnis
        files = os.listdir(dir_path)
        l_files = len(files)
        for i, file_name in enumerate(files):
            with Spinner(f"{file_name} {i}/{l_files}"):
                # Überprüfe, ob die Datei eine Python-Datei ist
                if file_name == "__init__.py":
                    pass

                elif remote and s in file_name and file_name.endswith('.gist'):
                    # print("Loading from Gist :", file_name)
                    name_f = os.path.splitext(file_name)[0]
                    name = name_f.split('.')[0]
                    # publisher = name_f.split('.')[1]
                    url = name_f.split('.')[-1]
                    # print("Ent", name)
                    # Lade das Modul
                    print(f"Gist Name: {name}, URL: {url}")
                    try:
                        module = GistLoader(f"{name}/{url}").load_module(name)
                    #try:
                    #    module = GistLoader(f"{name}/{url}")
                    except Exception as e:
                        print(f"Error loading module {name} from github {url}")
                        print(e)
                        continue

                    # Füge das Modul der Dictionary hinzu
                    print(f"{hasattr(module, 'run')} and {callable(module.run)} and {hasattr(module, 'NAME')}")
                    if hasattr(module, 'run') and callable(module.run) and hasattr(module, 'NAME'):
                        # print("Collecing :", module.NAME)
                        flows_dict_[module.NAME] = module.run
                elif file_name.endswith('.py') and s in file_name:
                    name = os.path.splitext(file_name)[0]
                    # print("Loading :", name)
                    # Lade das Modul
                    spec = importlib.util.spec_from_file_location(name, os.path.join(dir_path, file_name))
                    module = importlib.util.module_from_spec(spec)
                    try:
                        spec.loader.exec_module(module)
                    except Exception:
                        print("Error loading module ", name)
                        import traceback
                        traceback.print_exc()
                        continue

                    # Füge das Modul der Dictionary hinzu
                    if hasattr(module, 'run') and callable(module.run) and hasattr(module, 'NAME'):
                        # print("Collecing :", module.NAME)
                        flows_dict_[module.NAME] = module.run

        print(f"Getting all flows took {time.perf_counter() - to:.2f} for {len(flows_dict_.keys())} elements")
        return flows_dict_

toolboxv2.TBEF

Automatic generated by ToolBox v = 0.1.22

Other Exposed Items

toolboxv2.ToolBox_over = 'root' module-attribute